diff --git a/ReleaseNotesTmp.html.orig b/ReleaseNotesTmp.html.orig
deleted file mode 100644
index b84f9d4a7..000000000
--- a/ReleaseNotesTmp.html.orig
+++ /dev/null
@@ -1,76 +0,0 @@
-<<<<<<< HEAD
-Stereo Vision Toolkit v1.3.1
-
-Changes:
-- Add ML detector using YOLO or Tensorflow
-- Add loading of stereo image pair from image files (.png, .jpg, .jpeg)
-- Add toggle to show epipolar lines for checking calibration
-- Add RGBD streaming using shared memory (for use with I3DRStereo3D Unity package)
-- Add I3DR Titania stereo camera support
-- Add Vimba camera integration
-- Add colour image capture to Basler cameras
-- Add point cloud texture options (depth colourmap or image)
-- Add colour to loading/saving stereo videos
-- Add about dialogue to show software version
-- Add separate camera capture and processing threads
-- Add video saving of individual camera images or disparity
-- Add WLS filter to OpenCV BM and SGBM matchers
-- Add hostname and hostID to license message if I3DRSGM license is not found
-- Add new camera serials
-- Change load/save YAML calibration to use ROS perception supported YAML (changed from OpenCV FileStorage yaml)
-- Upgrade to OpenCV v4.5.0
-- Upgrade to CUDA 11.1
-- Upgrade to I3DRSGM v1.0.11
-- Upgrade to using Pylon v6.1.1.18345 library for Basler cameras
-
-Bug fixes:
-- Fix YAML file generates invalid Q matrix
-- Fix missing link to online documentation
-- Fix overwriting image during save
-- Fix non rectified images saved with '_rect' suffix
-- Fix maximum point cloud z value being set incorrectly
-- Fix camera capture delay when processing load is high
-- Fix I3DRSGM failed to initalise when switching to camera with different resolution
-
-Known issues:
-- ROS perception YAML's data arrays must be on single line in YAML file.
-- WLS filter causes speckle filter to sometimes be disabled.
-=======
-Stereo Vision Toolkit v1.3.1
-
-Changes:
-- Add ML detector using YOLO or Tensorflow
-- Add loading of stereo image pair from image files (.png, .jpg, .jpeg)
-- Add toggle to show epipolar lines for checking calibration
-- Add RGBD streaming using shared memory (for use with I3DRStereo3D Unity package)
-- Add I3DR Titania stereo camera support
-- Add Vimba camera integration
-- Add colour image capture to Basler cameras
-- Add point cloud texture options (depth colourmap or image)
-- Add colour to loading/saving stereo videos
-- Add about dialogue to show software version
-- Add separate camera capture and processing threads
-- Add video saving of individual camera images or disparity
-- Add WLS filter to OpenCV BM and SGBM matchers
-- Add hostname and hostID to license message if I3DRSGM license is not found
-- Add new camera serials
-- Change load/save YAML calibration to use ROS perception supported YAML (changed from OpenCV FileStorage yaml)
-- Upgrade to OpenCV v4.5.0
-- Upgrade to CUDA 11.1
-- Upgrade to I3DRSGM v1.0.11
-- Upgrade to using Pylon v6.1.1.18345 library for Basler cameras
-
-Bug fixes:
-- Fix YAML file generates invalid Q matrix
-- Fix missing link to online documentation
-- Fix overwriting image during save
-- Fix non rectified images saved with '_rect' suffix
-- Fix maximum point cloud z value being set incorrectly
-- Fix camera capture delay when processing load is high
-- Fix I3DRSGM failed to initalise when switching to camera with different resolution
-
-Known issues:
-- ROS perception YAML's data arrays must be on single line in YAML file.
-- WLS filter causes speckle filter to sometimes be disabled.
->>>>>>> dev
-- Stereo videos are saved uncompressed to avoid lossy compression which would cause issues when loading stereo videos. This will create large file sizes.
\ No newline at end of file
diff --git a/resources/ml/coco/coco.names.orig b/resources/ml/coco/coco.names.orig
deleted file mode 100644
index 5d037108a..000000000
--- a/resources/ml/coco/coco.names.orig
+++ /dev/null
@@ -1,162 +0,0 @@
-<<<<<<< HEAD
-person
-bicycle
-car
-motorbike
-aeroplane
-bus
-train
-truck
-boat
-traffic light
-fire hydrant
-stop sign
-parking meter
-bench
-bird
-cat
-dog
-horse
-sheep
-cow
-elephant
-bear
-zebra
-giraffe
-backpack
-umbrella
-handbag
-tie
-suitcase
-frisbee
-skis
-snowboard
-sports ball
-kite
-baseball bat
-baseball glove
-skateboard
-surfboard
-tennis racket
-bottle
-wine glass
-cup
-fork
-knife
-spoon
-bowl
-banana
-apple
-sandwich
-orange
-broccoli
-carrot
-hot dog
-pizza
-donut
-cake
-chair
-sofa
-pottedplant
-bed
-diningtable
-toilet
-tvmonitor
-laptop
-mouse
-remote
-keyboard
-cell phone
-microwave
-oven
-toaster
-sink
-refrigerator
-book
-clock
-vase
-scissors
-teddy bear
-hair drier
-=======
-person
-bicycle
-car
-motorbike
-aeroplane
-bus
-train
-truck
-boat
-traffic light
-fire hydrant
-stop sign
-parking meter
-bench
-bird
-cat
-dog
-horse
-sheep
-cow
-elephant
-bear
-zebra
-giraffe
-backpack
-umbrella
-handbag
-tie
-suitcase
-frisbee
-skis
-snowboard
-sports ball
-kite
-baseball bat
-baseball glove
-skateboard
-surfboard
-tennis racket
-bottle
-wine glass
-cup
-fork
-knife
-spoon
-bowl
-banana
-apple
-sandwich
-orange
-broccoli
-carrot
-hot dog
-pizza
-donut
-cake
-chair
-sofa
-pottedplant
-bed
-diningtable
-toilet
-tvmonitor
-laptop
-mouse
-remote
-keyboard
-cell phone
-microwave
-oven
-toaster
-sink
-refrigerator
-book
-clock
-vase
-scissors
-teddy bear
-hair drier
->>>>>>> dev
-toothbrush
\ No newline at end of file
diff --git a/resources/ml/coco/yolov4-tiny.cfg.orig b/resources/ml/coco/yolov4-tiny.cfg.orig
deleted file mode 100644
index 48a29d922..000000000
--- a/resources/ml/coco/yolov4-tiny.cfg.orig
+++ /dev/null
@@ -1,564 +0,0 @@
-<<<<<<< HEAD
-[net]
-# Testing
-#batch=1
-#subdivisions=1
-# Training
-batch=64
-subdivisions=1
-width=416
-height=416
-channels=3
-momentum=0.9
-decay=0.0005
-angle=0
-saturation = 1.5
-exposure = 1.5
-hue=.1
-
-learning_rate=0.00261
-burn_in=1000
-max_batches = 500200
-policy=steps
-steps=400000,450000
-scales=.1,.1
-
-[convolutional]
-batch_normalize=1
-filters=32
-size=3
-stride=2
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=2
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers=-1
-groups=2
-group_id=1
-
-[convolutional]
-batch_normalize=1
-filters=32
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=32
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -1,-2
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -6,-1
-
-[maxpool]
-size=2
-stride=2
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers=-1
-groups=2
-group_id=1
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -1,-2
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -6,-1
-
-[maxpool]
-size=2
-stride=2
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers=-1
-groups=2
-group_id=1
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -1,-2
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -6,-1
-
-[maxpool]
-size=2
-stride=2
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=3
-stride=1
-pad=1
-activation=leaky
-
-##################################
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-size=1
-stride=1
-pad=1
-filters=255
-activation=linear
-
-
-
-[yolo]
-mask = 3,4,5
-anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
-classes=80
-num=6
-jitter=.3
-scale_x_y = 1.05
-cls_normalizer=1.0
-iou_normalizer=0.07
-iou_loss=ciou
-ignore_thresh = .7
-truth_thresh = 1
-random=0
-resize=1.5
-nms_kind=greedynms
-beta_nms=0.6
-
-[route]
-layers = -4
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[upsample]
-stride=2
-
-[route]
-layers = -1, 23
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-size=1
-stride=1
-pad=1
-filters=255
-activation=linear
-
-[yolo]
-mask = 1,2,3
-anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
-classes=80
-num=6
-jitter=.3
-scale_x_y = 1.05
-cls_normalizer=1.0
-iou_normalizer=0.07
-iou_loss=ciou
-ignore_thresh = .7
-truth_thresh = 1
-random=0
-resize=1.5
-nms_kind=greedynms
-=======
-[net]
-# Testing
-#batch=1
-#subdivisions=1
-# Training
-batch=64
-subdivisions=1
-width=416
-height=416
-channels=3
-momentum=0.9
-decay=0.0005
-angle=0
-saturation = 1.5
-exposure = 1.5
-hue=.1
-
-learning_rate=0.00261
-burn_in=1000
-max_batches = 500200
-policy=steps
-steps=400000,450000
-scales=.1,.1
-
-[convolutional]
-batch_normalize=1
-filters=32
-size=3
-stride=2
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=2
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers=-1
-groups=2
-group_id=1
-
-[convolutional]
-batch_normalize=1
-filters=32
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=32
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -1,-2
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -6,-1
-
-[maxpool]
-size=2
-stride=2
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers=-1
-groups=2
-group_id=1
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=64
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -1,-2
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -6,-1
-
-[maxpool]
-size=2
-stride=2
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers=-1
-groups=2
-group_id=1
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -1,-2
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[route]
-layers = -6,-1
-
-[maxpool]
-size=2
-stride=2
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=3
-stride=1
-pad=1
-activation=leaky
-
-##################################
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-batch_normalize=1
-filters=512
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-size=1
-stride=1
-pad=1
-filters=255
-activation=linear
-
-
-
-[yolo]
-mask = 3,4,5
-anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
-classes=80
-num=6
-jitter=.3
-scale_x_y = 1.05
-cls_normalizer=1.0
-iou_normalizer=0.07
-iou_loss=ciou
-ignore_thresh = .7
-truth_thresh = 1
-random=0
-resize=1.5
-nms_kind=greedynms
-beta_nms=0.6
-
-[route]
-layers = -4
-
-[convolutional]
-batch_normalize=1
-filters=128
-size=1
-stride=1
-pad=1
-activation=leaky
-
-[upsample]
-stride=2
-
-[route]
-layers = -1, 23
-
-[convolutional]
-batch_normalize=1
-filters=256
-size=3
-stride=1
-pad=1
-activation=leaky
-
-[convolutional]
-size=1
-stride=1
-pad=1
-filters=255
-activation=linear
-
-[yolo]
-mask = 1,2,3
-anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
-classes=80
-num=6
-jitter=.3
-scale_x_y = 1.05
-cls_normalizer=1.0
-iou_normalizer=0.07
-iou_loss=ciou
-ignore_thresh = .7
-truth_thresh = 1
-random=0
-resize=1.5
-nms_kind=greedynms
->>>>>>> dev
-beta_nms=0.6
\ No newline at end of file
diff --git a/src/aboutdialog.cpp.orig b/src/aboutdialog.cpp.orig
deleted file mode 100644
index f0efd831d..000000000
--- a/src/aboutdialog.cpp.orig
+++ /dev/null
@@ -1,41 +0,0 @@
-<<<<<<< HEAD
-/*
-* Copyright I3D Robotics Ltd, 2020
-* Author: Ben Knight (bknight@i3drobotics.com)
-*/
-
-#include "aboutdialog.h"
-#include "ui_aboutdialog.h"
-
-AboutDialog::AboutDialog(QWidget *parent)
- : QDialog(parent), ui(new Ui::AboutDialog) {
- ui->setupUi(this);
-
-}
-
-void AboutDialog::setVersion(QString version){
- ui->lblVersion->setText(version);
-}
-
-AboutDialog::~AboutDialog() { delete ui; }
-=======
-/*
-* Copyright I3D Robotics Ltd, 2020
-* Author: Ben Knight (bknight@i3drobotics.com)
-*/
-
-#include "aboutdialog.h"
-#include "ui_aboutdialog.h"
-
-AboutDialog::AboutDialog(QWidget *parent)
- : QDialog(parent), ui(new Ui::AboutDialog) {
- ui->setupUi(this);
-
-}
-
-void AboutDialog::setVersion(QString version){
- ui->lblVersion->setText(version);
-}
-
-AboutDialog::~AboutDialog() { delete ui; }
->>>>>>> dev
diff --git a/src/aboutdialog.h.orig b/src/aboutdialog.h.orig
deleted file mode 100644
index 14ab5b52a..000000000
--- a/src/aboutdialog.h.orig
+++ /dev/null
@@ -1,73 +0,0 @@
-<<<<<<< HEAD
-/*
-* Copyright I3D Robotics Ltd, 2020
-* Author: Ben Knight (bknight@i3drobotics.com)
-*/
-
-#ifndef ABOUTDIALOG_H
-#define ABOUTDIALOG_H
-
-#include
-#include
-
-namespace Ui {
-class AboutDialog;
-}
-
-//! About dialog
-/*!
- Dialog for show information about the software
-*/
-class AboutDialog : public QDialog
-{
- Q_OBJECT
-
-public:
- explicit AboutDialog(QWidget *parent = 0);
- void setVersion(QString version);
- ~AboutDialog();
-
-private:
- //! QT UI dialog
- Ui::AboutDialog *ui;
-
-};
-
-#endif // CALIBRATEFROMIMAGESDIALOG_H
-=======
-/*
-* Copyright I3D Robotics Ltd, 2020
-* Author: Ben Knight (bknight@i3drobotics.com)
-*/
-
-#ifndef ABOUTDIALOG_H
-#define ABOUTDIALOG_H
-
-#include
-#include
-
-namespace Ui {
-class AboutDialog;
-}
-
-//! About dialog
-/*!
- Dialog for show information about the software
-*/
-class AboutDialog : public QDialog
-{
- Q_OBJECT
-
-public:
- explicit AboutDialog(QWidget *parent = 0);
- void setVersion(QString version);
- ~AboutDialog();
-
-private:
- //! QT UI dialog
- Ui::AboutDialog *ui;
-
-};
-
-#endif // CALIBRATEFROMIMAGESDIALOG_H
->>>>>>> dev
diff --git a/src/aboutdialog.ui.orig b/src/aboutdialog.ui.orig
deleted file mode 100644
index c1a524943..000000000
--- a/src/aboutdialog.ui.orig
+++ /dev/null
@@ -1,103 +0,0 @@
-<<<<<<< HEAD
-
-
- AboutDialog
-
-
-
- 0
- 0
- 200
- 61
-
-
-
-
- 0
- 0
-
-
-
- About
-
-
- -
-
-
-
- 0
- 0
-
-
-
- Version Placeholder
-
-
- Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter
-
-
-
- -
-
-
- (C) Industrial 3D Robotics 2021
-
-
-
-
-
-
-
-
-=======
-
-
- AboutDialog
-
-
-
- 0
- 0
- 200
- 61
-
-
-
-
- 0
- 0
-
-
-
- About
-
-
- -
-
-
-
- 0
- 0
-
-
-
- Version Placeholder
-
-
- Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter
-
-
-
- -
-
-
- (C) Industrial 3D Robotics 2021
-
-
-
-
-
-
-
-
->>>>>>> dev
diff --git a/src/camera/cameracontrol/cameracontrol/cameracontrol.ino.orig b/src/camera/cameracontrol/cameracontrol/cameracontrol.ino.orig
deleted file mode 100644
index f14a37750..000000000
--- a/src/camera/cameracontrol/cameracontrol/cameracontrol.ino.orig
+++ /dev/null
@@ -1,235 +0,0 @@
-<<<<<<< HEAD
-/*
-* Copyright I3D Robotics Ltd, 2020
-* Author: Ben Knight (bknight@i3drobotics.com)
-*/
-
-#define PHOBOS_USB_TRIGGER_1 2
-#define PHOBOS_USB_TRIGGER_2 2
-
-#define PHOBOS_GIGE_TRIGGER_1 3
-#define PHOBOS_GIGE_TRIGGER_2 2
-
-#define TITANIA_USB_TRIGGER_1 1
-#define TITANIA_USB_TRIGGER_2 3
-
-//Choose trigger pins based on the camera being used
-#define CAMERA_TRIGGER_1 TITANIA_USB_TRIGGER_1
-#define CAMERA_TRIGGER_2 TITANIA_USB_TRIGGER_2
-
-//Comment out line when not using the IMU
-#define USE_IMU
-#ifdef USE_IMU
- #include
- #include
- #include
- #include
-
- //Setup imu device
- Adafruit_BNO055 bno = Adafruit_BNO055(55);
-#endif
-
-double frame_delay; // amount of time between triggered (1/fps)
-int trigger_time = 10; // time for trigger to be registered by camera
-double fps = 5; // inital fps
-String inString = ""; // string to hold input
-unsigned long lastTriggerTime = 0;
-unsigned long timeSinceTrigger;
-#ifdef USE_IMU
- bool imuInit = false;
-#endif
-
-void setup() {
- Serial.begin(115200);
- #ifdef USE_IMU
- // Initialise the IMU sensor
- if(!bno.begin())
- {
- // Problem detecting the BNO055 ... check your connections
- // serial will return -1000 in all axis in loop to indicate the problem
- imuInit = false;
- } else {
- bno.setExtCrystalUse(true);
- imuInit = true;
- }
- #endif
- pinMode(CAMERA_TRIGGER_1, OUTPUT);
- pinMode(CAMERA_TRIGGER_2, OUTPUT);
- pinMode(LED_BUILTIN,OUTPUT);
-
- lastTriggerTime = millis();
-}
-
-void loop() {
- // Check time since last trigger
- timeSinceTrigger = millis() - lastTriggerTime;
- // Calculate frame delay from fps (can change based on user input)
- frame_delay = 1000/fps;
-
- // Wait for time to trigger the camera (without stopping the program)
- if (timeSinceTrigger > frame_delay){
- // Triggger camera (pulse high and then low
- digitalWrite(LED_BUILTIN, HIGH);
- digitalWrite(CAMERA_TRIGGER_1, HIGH);
- digitalWrite(CAMERA_TRIGGER_2, HIGH);
- lastTriggerTime = millis();
- delay(trigger_time); // Wait small time for high to be registered by camera
- digitalWrite(LED_BUILTIN, LOW);
- digitalWrite(CAMERA_TRIGGER_1, LOW);
- digitalWrite(CAMERA_TRIGGER_2, LOW);
- }
-
- // Read trigger frame rate from serial
- if (Serial.available() > 0) { //Only read if data is available
- int inChar = Serial.read();
- if (isDigit(inChar)) {
- // Convert the incoming byte to a char and add it to the string:
- inString += (char)inChar;
- }
- // If you get a newline, print the string, then the string's value:
- if (inChar == '\n') {
- fps = inString.toInt();
- // clear the string for new input:
- inString = "";
- }
- #ifdef USE_IMU
- } else {
- if (imuInit){
- // Get a new IMU sensor event
- sensors_event_t event;
- bno.getEvent(&event);
-
- // Display the floating point data
- Serial.print(event.orientation.x, 4);
- Serial.print(",");
- Serial.print(event.orientation.y, 4);
- Serial.print(",");
- Serial.print(event.orientation.z, 4);
- Serial.println();
- } else {
- // IMU failed to initalise return -1000 in all axis
- Serial.println("-1000,-1000,-1000");
- }
- }
- #else
- }
- #endif
-}
-=======
-/*
-* Copyright I3D Robotics Ltd, 2020
-* Author: Ben Knight (bknight@i3drobotics.com)
-*/
-
-#define PHOBOS_USB_TRIGGER_1 2
-#define PHOBOS_USB_TRIGGER_2 2
-
-#define PHOBOS_GIGE_TRIGGER_1 3
-#define PHOBOS_GIGE_TRIGGER_2 2
-
-#define TITANIA_USB_TRIGGER_1 1
-#define TITANIA_USB_TRIGGER_2 3
-
-//Choose trigger pins based on the camera being used
-#define CAMERA_TRIGGER_1 TITANIA_USB_TRIGGER_1
-#define CAMERA_TRIGGER_2 TITANIA_USB_TRIGGER_2
-
-//Comment out line when not using the IMU
-#define USE_IMU
-#ifdef USE_IMU
- #include
- #include
- #include
- #include
-
- //Setup imu device
- Adafruit_BNO055 bno = Adafruit_BNO055(55);
-#endif
-
-double frame_delay; // amount of time between triggered (1/fps)
-int trigger_time = 10; // time for trigger to be registered by camera
-double fps = 5; // inital fps
-String inString = ""; // string to hold input
-unsigned long lastTriggerTime = 0;
-unsigned long timeSinceTrigger;
-#ifdef USE_IMU
- bool imuInit = false;
-#endif
-
-void setup() {
- Serial.begin(115200);
- #ifdef USE_IMU
- // Initialise the IMU sensor
- if(!bno.begin())
- {
- // Problem detecting the BNO055 ... check your connections
- // serial will return -1000 in all axis in loop to indicate the problem
- imuInit = false;
- } else {
- bno.setExtCrystalUse(true);
- imuInit = true;
- }
- #endif
- pinMode(CAMERA_TRIGGER_1, OUTPUT);
- pinMode(CAMERA_TRIGGER_2, OUTPUT);
- pinMode(LED_BUILTIN,OUTPUT);
-
- lastTriggerTime = millis();
-}
-
-void loop() {
- // Check time since last trigger
- timeSinceTrigger = millis() - lastTriggerTime;
- // Calculate frame delay from fps (can change based on user input)
- frame_delay = 1000/fps;
-
- // Wait for time to trigger the camera (without stopping the program)
- if (timeSinceTrigger > frame_delay){
- // Triggger camera (pulse high and then low
- digitalWrite(LED_BUILTIN, HIGH);
- digitalWrite(CAMERA_TRIGGER_1, HIGH);
- digitalWrite(CAMERA_TRIGGER_2, HIGH);
- lastTriggerTime = millis();
- delay(trigger_time); // Wait small time for high to be registered by camera
- digitalWrite(LED_BUILTIN, LOW);
- digitalWrite(CAMERA_TRIGGER_1, LOW);
- digitalWrite(CAMERA_TRIGGER_2, LOW);
- }
-
- // Read trigger frame rate from serial
- if (Serial.available() > 0) { //Only read if data is available
- int inChar = Serial.read();
- if (isDigit(inChar)) {
- // Convert the incoming byte to a char and add it to the string:
- inString += (char)inChar;
- }
- // If you get a newline, print the string, then the string's value:
- if (inChar == '\n') {
- fps = inString.toInt();
- // clear the string for new input:
- inString = "";
- }
- #ifdef USE_IMU
- } else {
- if (imuInit){
- // Get a new IMU sensor event
- sensors_event_t event;
- bno.getEvent(&event);
-
- // Display the floating point data
- Serial.print(event.orientation.x, 4);
- Serial.print(",");
- Serial.print(event.orientation.y, 4);
- Serial.print(",");
- Serial.print(event.orientation.z, 4);
- Serial.println();
- } else {
- // IMU failed to initalise return -1000 in all axis
- Serial.println("-1000,-1000,-1000");
- }
- }
- #else
- }
- #endif
-}
->>>>>>> dev
diff --git a/src/camera/cameracontrol/imucontrol/imucontrol/imucontrol.ino.orig b/src/camera/cameracontrol/imucontrol/imucontrol/imucontrol.ino.orig
deleted file mode 100644
index 2fd49ae60..000000000
--- a/src/camera/cameracontrol/imucontrol/imucontrol/imucontrol.ino.orig
+++ /dev/null
@@ -1,87 +0,0 @@
-<<<<<<< HEAD
-#include
-#include
-#include
-#include
-
-Adafruit_BNO055 bno = Adafruit_BNO055(55);
-
-void setup(void)
-{
- Serial.begin(9600);
- Serial.println("Orientation Sensor Test"); Serial.println("");
-
- /* Initialise the sensor */
- if(!bno.begin())
- {
- /* There was a problem detecting the BNO055 ... check your connections */
- Serial.print("Ooops, no BNO055 detected ... Check your wiring or I2C ADDR!");
- while(1);
- }
-
- delay(1000);
-
- bno.setExtCrystalUse(true);
-}
-
-void loop(void)
-{
- /* Get a new sensor event */
- sensors_event_t event;
- bno.getEvent(&event);
-
- /* Display the floating point data */
- Serial.print("X: ");
- Serial.print(event.orientation.x, 4);
- Serial.print("\tY: ");
- Serial.print(event.orientation.y, 4);
- Serial.print("\tZ: ");
- Serial.print(event.orientation.z, 4);
- Serial.println("");
-
- delay(100);
-}
-=======
-#include
-#include
-#include
-#include
-
-Adafruit_BNO055 bno = Adafruit_BNO055(55);
-
-void setup(void)
-{
- Serial.begin(9600);
- Serial.println("Orientation Sensor Test"); Serial.println("");
-
- /* Initialise the sensor */
- if(!bno.begin())
- {
- /* There was a problem detecting the BNO055 ... check your connections */
- Serial.print("Ooops, no BNO055 detected ... Check your wiring or I2C ADDR!");
- while(1);
- }
-
- delay(1000);
-
- bno.setExtCrystalUse(true);
-}
-
-void loop(void)
-{
- /* Get a new sensor event */
- sensors_event_t event;
- bno.getEvent(&event);
-
- /* Display the floating point data */
- Serial.print("X: ");
- Serial.print(event.orientation.x, 4);
- Serial.print("\tY: ");
- Serial.print(event.orientation.y, 4);
- Serial.print("\tZ: ");
- Serial.print(event.orientation.z, 4);
- Serial.println("");
-
- delay(100);
-}
->>>>>>> dev
diff --git a/src/camera/cameravimba.cpp.orig b/src/camera/cameravimba.cpp.orig
deleted file mode 100644
index 9dd51cd35..000000000
--- a/src/camera/cameravimba.cpp.orig
+++ /dev/null
@@ -1,1211 +0,0 @@
-<<<<<<< HEAD
-#include "cameravimba.h"
-
-CameraVimba::CameraVimba(QObject *parent) : QObject(parent), system(VimbaSystem::GetInstance())
-{
-}
-
-
-void CameraVimba::assignThread(QThread *thread)
-{
- moveToThread(thread);
- connect(this, SIGNAL(finished()), thread, SLOT(quit()));
- connect(this, SIGNAL(finished()), this, SLOT(deleteLater()));
- connect(thread, SIGNAL(finished()), thread, SLOT(deleteLater()));
- thread->start();
-}
-
-void CameraVimba::close() {
-
- qDebug() << "Closing camera";
-
- stopCapture();
- camera->Close();
-
- connected = false;
-}
-
-bool CameraVimba::initCamera(std::string camera_serial,int binning, bool trigger, int fps)
-{
- this->camera_serial = camera_serial;
- this->binning = binning;
- this->trigger = trigger;
- this->fps = fps;
- connected = false;
-
- CameraPtrVector cameras;
- system.GetCameras(cameras);
-
- bool cameraFind = false;
- for (const CameraPtr &trial_camera : cameras){
- std::string serial;
- trial_camera->GetSerialNumber(serial);
-
- if(serial == camera_serial){
- cameraFind = true;
- camera = trial_camera;
- break;
- }
- }
-
- if (!cameraFind){
- std::cerr << "Failed to find camera with name: " << camera_serial << std::endl;
- return false;
- }
-
- // Print the model name of the camera.
- std::string model;
- camera->GetModel(model);
- qDebug() << "Using device " << model.c_str();
-
- auto error = camera->Open(VmbAccessModeFull);
-
- if (error == VmbErrorSuccess){
-
- bool success = changeFPS(fps);
- if(!success)
- qDebug() << "Failed to set FPS";
-
- // Currently binning is not supported
- //bool error = setBinning(binning);
- success = enableTrigger(trigger);
- if(!success)
- qDebug() << "Failed to set trigger mode";
-
- frame_observer = shared_ptr(new FrameObserver(camera));
- connected = true;
-
- } else {
- qDebug() << "Failed to open camera";
- connected = false;
- }
-
- return connected;
-}
-
-void CameraVimba::startCapture(void){
-
- if(capturing) return;
-
- auto res = camera->StartContinuousImageAcquisition(15, frame_observer);
-
- if(res != VmbErrorSuccess){
- qDebug() << "Couldn't start capture!" << res;
- }else{
- connect(frame_observer.get(), SIGNAL( frameReady(int) ), this, SLOT( onFrame(int) ) );
- capturing = true;
- }
-}
-
-void CameraVimba::stopCapture(void){
- if(!capturing) return;
-
- camera->StopContinuousImageAcquisition();
- disconnect(frame_observer.get(), SIGNAL( frameReady(int) ), this, SLOT( onFrame(int) ) );
- capturing = false;
-}
-
-void CameraVimba::onFrame(int status){
- if(!connected){
- return;
- }
-
- // Pick up frame
- FramePtr pFrame = frame_observer->getFrame();
- if( pFrame == nullptr){
- qDebug() << "frame pointer is NULL, late frame ready message";
- return;
- }
-
- // See if it is not corrupt
- if( status == VmbFrameStatusComplete ){
- VmbUchar_t *pBuffer;
- VmbErrorType err = pFrame.get()->GetImage(pBuffer);
- if( VmbErrorSuccess != err )
- {
- qDebug() << "Failed to get image pointer";
- return;
- }
-
- VmbPixelFormatType ePixelFormat = VmbPixelFormatMono8;
- err = pFrame->GetPixelFormat( ePixelFormat );
- if ( err != VmbErrorSuccess)
- {
- qDebug() << "Failed to get pixel format";
- return;
- }
-
- if (( ePixelFormat != VmbPixelFormatMono8 ))
- {
- qDebug() << "Invalid pixel format";
- return;
- }
-
- VmbUint32_t nImageSize = 0;
- err = pFrame->GetImageSize( nImageSize );
- if ( err != VmbErrorSuccess )
- {
- qDebug() << "Failed to get image size";
- return;
- }
-
- VmbUint32_t nWidth = 0;
- err = pFrame->GetWidth( nWidth );
- if ( err != VmbErrorSuccess ){
- qDebug() << "Failed to get image width";
- return;
- }
-
- VmbUint32_t nHeight = 0;
- err = pFrame->GetHeight( nHeight );
- if ( err != VmbErrorSuccess )
- {
- qDebug() << "Failed to get image height";
- return;
- }
-
- VmbUchar_t *pImage = nullptr;
- err = pFrame->GetImage( pImage );
- if ( err == VmbErrorSuccess)
- {
- frame_mutex.lock();
- cv::Mat image_temp = cv::Mat(static_cast(nHeight), static_cast(nWidth), CV_8UC1, pImage );
- image_temp.copyTo(image);
- frame_mutex.unlock();
-
- }
-
- camera->QueueFrame(pFrame);
- }
-
- emit captured();
-}
-
-VmbError_t CameraVimba::getStringFeature(std::string name, std::string &res){
-
- FeaturePtr feature;
- auto error = camera->GetFeatureByName(name.c_str(), feature);
- if(error == VmbErrorSuccess){
- feature->GetValue(res);
- }
-
- return error;
-}
-
-VmbError_t CameraVimba::getIntFeature(std::string name, long long &res){
-
- FeaturePtr feature;
- auto error = camera->GetFeatureByName(name.c_str(), feature);
- if(error == VmbErrorSuccess){
- VmbInt64_t out;
- feature->GetValue(out);
- res = static_cast(out);
- }
-
- return error;
-}
-
-VmbError_t CameraVimba::getBoolFeature(std::string name, bool &res){
-
- FeaturePtr feature;
- auto error = camera->GetFeatureByName(name.c_str(), feature);
- if(error == VmbErrorSuccess){
- feature->GetValue(res);
- }
-
- return error;
-}
-
-VmbError_t CameraVimba::getDoubleFeature(std::string name, double &res){
-
- FeaturePtr feature;
- auto error = camera->GetFeatureByName(name.c_str(), feature);
- if(error == VmbErrorSuccess){
- feature->GetValue(res);
- }
-
- return error;
-}
-
-void CameraVimba::getImageSize(int &width, int &height, int &bitdepth)
-{
- //get image size
- long long h = 0, w = 0;
- auto err_h = getIntFeature("Height", h);
- auto err_w = getIntFeature("Width", w);
-
- std::string format;
- auto err_fmt = getStringFeature("PixelSize", format);
-
- if (err_h == VmbErrorSuccess && err_w == VmbErrorSuccess && err_fmt == VmbErrorSuccess){
- height = static_cast(h);
- width = static_cast(w);
- if(format == "Bpp8")
- bitdepth = 8;
- else if(format == "Bpp16")
- bitdepth = 16;
- } else {
- qDebug() << "Failed to get width / height";
- }
-}
-
-bool CameraVimba::changeFPS(int fps){
- bool res = true;
- bool capture_state = capturing;
-
- if(capture_state){
- stopCapture();
- }
- // unlimited frame rate if set to 0
- if (fps > 0){
- // Order is important!
- res &= enableFPS(true);
- if(!res)
- qDebug() << "Failed to enable FPS";
- res &= setFPS(fps);
- if(!res)
- qDebug() << "Failed to set FPS";
-
- this->fps = fps;
- } else {
- res &= enableFPS(false);
- this->fps = fps;
- }
- if(capture_state){
- startCapture();
- }
- return res;
-}
-
-bool CameraVimba::setFPS(int fps){
- FeaturePtr feature;
- qDebug() << "Setting fps: " << fps;
- camera->GetFeatureByName("AcquisitionFrameRate", feature);
- auto error = feature->SetValue(static_cast(fps));
- return error == VmbErrorSuccess;
-}
-
-bool CameraVimba::enableFPS(bool enable){
- if (enable){
- qDebug() << "Enabling fps";
- } else {
- qDebug() << "Disabling fps";
- }
-
- FeaturePtr feature;
- camera->GetFeatureByName("AcquisitionFrameRateEnable", feature);
- auto error = feature->SetValue(enable);
-
- bool state;
- feature->GetValue(state);
-
- return (error == VmbErrorSuccess) && (state == enable);
-}
-
-bool CameraVimba::enableTrigger(bool enable){
- FeaturePtr feature;
- VmbError_t error;
- std::string state;
- std::string requested_trigger_mode;
- std::string requested_line_selector;
- std::string requested_line_mode;
- std::string requested_trigger_source;
- std::string requested_trigger_selector;
- bool success = true;
-
- if(enable){
- qDebug() << "Enabling trigger";
- requested_trigger_mode = "On";
- requested_trigger_source = "Line0";
- requested_trigger_selector = "FrameStart";
- requested_line_selector = "Line0";
- requested_line_mode = "Input";
- } else {
- qDebug() << "Disabling trigger";
- requested_trigger_mode = "Off";
- requested_trigger_source = "Software";
- requested_trigger_selector = "FrameStart";
- requested_line_selector = "Line0";
- requested_line_mode = "Input";
- }
-
- if (enable){
- /*
- camera->GetFeatureByName("LineSelector", feature);
- error = feature->SetValue(requested_line_selector.c_str());
- feature->GetValue(state);
- success &= (error == VmbErrorSuccess) && (state == requested_line_selector);
-
- if (!success){
- qDebug() << "Failed to set line selector";
- return success;
- }
-
- camera->GetFeatureByName("LineMode", feature);
- error = feature->SetValue(requested_line_mode.c_str());
- feature->GetValue(state);
- success &= (error == VmbErrorSuccess) && (state == requested_line_mode);
-
- if (!success){
- qDebug() << "Failed to set line mode";
- return success;
- }
- */
- }
-
- camera->GetFeatureByName("TriggerSelector", feature);
- error = feature->SetValue(requested_trigger_selector.c_str());
- feature->GetValue(state);
- success &= (error == VmbErrorSuccess) && (state == requested_trigger_selector);
-
- if (!success){
- qDebug() << "Failed to set trigger selector";
- return success;
- }
-
- camera->GetFeatureByName("TriggerSource", feature);
- error = feature->SetValue(requested_trigger_source.c_str());
- feature->GetValue(state);
- success &= (error == VmbErrorSuccess) && (state == requested_trigger_source);
-
- if (!success){
- qDebug() << "Failed to set trigger source";
- return success;
- }
-
- camera->GetFeatureByName("TriggerMode", feature);
- error = feature->SetValue(requested_trigger_mode.c_str());
- feature->GetValue(state);
- success &= (error == VmbErrorSuccess) && (state == requested_trigger_mode);
-
- if (!success){
- qDebug() << "Failed to set trigger mode";
- qDebug() << state.c_str();
- return success;
- }
-
- return success;
-}
-
-bool CameraVimba::changeBinning(int binning){
- close();
- return (initCamera(this->camera_serial,binning,this->trigger,this->fps));
-}
-
-double CameraVimba::getFPS(void){
- FeaturePtr feature;
- double fps;
- auto success = getDoubleFeature("AcquisitionFrameRate", fps);
- if(success != VmbErrorSuccess){
- fps = -1;
- }
- return fps;
-}
-
-
-bool CameraVimba::setBinning(int binning)
-{
- FeaturePtr feature;
- bool success = true;
- camera->GetFeatureByName("BinningHorizontal", feature);
- // Camera doesn't support binning
- if(feature.get() == nullptr) return false;
-
- success &= (feature->SetValue(binning) == VmbErrorSuccess);
- camera->GetFeatureByName("BinningVertical", feature);
- success &= (feature->SetValue(binning) == VmbErrorSuccess);
-
- return success;
-}
-
-bool CameraVimba::setExposure(double exposure)
-{
- FeaturePtr feature;
- //int fps = static_cast(getFPS());
- camera.get()->GetFeatureByName("ExposureTime", feature);
- double exposure_scaled = exposure * 1000;
- qDebug() << "Setting exposure to: " << exposure_scaled;
- qDebug() << "Vimba exposure to: " << exposure_scaled;
- auto error = feature->SetValue(exposure_scaled); // microseconds
-
- if(error != VmbErrorSuccess){
- qDebug() << "Failed to set exposure";
- }else{
- /*
- if(fps < 30 && 1.0/(1000.0*exposure) < 1.0/30){
- changeFPS(30);
- }
- */
- }
-
- return error == VmbErrorSuccess;
-}
-
-bool CameraVimba::enableAutoExposure(bool enable)
-{
- std::string exposure_mode = "Off";
- if(enable) exposure_mode = "Continuous";
-
- FeaturePtr feature;
-
- if (enable){
- qDebug() << "Enabling auto exposure";
- } else {
- qDebug() << "Disabling auto exposure";
- }
-
- camera->GetFeatureByName("ExposureAuto", feature);
- auto error = feature->SetValue(exposure_mode.c_str());
-
- return error == VmbErrorSuccess;
-}
-
-bool CameraVimba::enableAutoGain(bool enable)
-{
-
- if (enable){
- qDebug() << "Enabling auto gain";
- } else {
- qDebug() << "Disabling auto gain";
- }
-
- std::string gain_mode = "Off";
- if(enable) gain_mode = "Continuous";
-
- FeaturePtr feature;
- camera->GetFeatureByName("GainAuto", feature);
- auto error = feature->SetValue(gain_mode.c_str());
-
- return error == VmbErrorSuccess;
-}
-
-bool CameraVimba::setGain(int gain)
-{
-
- enableAutoGain(false);
-
- FeaturePtr feature;
- camera.get()->GetFeatureByName("Gain", feature);
-
- qDebug() << "Setting gain to: " << gain;
-
- double min_gain, max_gain;
- feature->GetRange(min_gain, max_gain);
-
- qDebug() << "Vimba gain to: " << max_gain*static_cast(gain)/100.0;
-
- auto error = feature->SetValue(max_gain*static_cast(gain)/100.0);
-
- return error == VmbErrorSuccess;
-}
-
-bool CameraVimba::capture(void)
-{
- bool res = false;
- QElapsedTimer timer;
- timer.start();
-
- if (connected){
- // get image from cameras
- AVT::VmbAPI::FramePtr pFrame;
- VmbErrorType capture_err;
- VmbFrameStatusType status = VmbFrameStatusIncomplete;
-
- //qDebug() << "Init: " << timer.nsecsElapsed() / 1e9;
-
- capture_err = camera->AcquireSingleImage( pFrame, 5000 );
- //timer.start();
-
- if ( capture_err == VmbErrorSuccess)
- {
- capture_err = pFrame->GetReceiveStatus( status );
- if ( capture_err == VmbErrorSuccess
- && status == VmbFrameStatusComplete)
- {
- VmbPixelFormatType ePixelFormat = VmbPixelFormatMono8;
- capture_err = pFrame->GetPixelFormat( ePixelFormat );
- if ( capture_err == VmbErrorSuccess)
- {
- if ( ( ePixelFormat != VmbPixelFormatMono8 ))
- {
- //capture_err = VmbErrorInvalidValue;
- qDebug() << "Invalid pixel format";
- res = false;
- }
- else
- {
- VmbUint32_t nImageSize = 0;
- capture_err = pFrame->GetImageSize( nImageSize );
- if ( capture_err == VmbErrorSuccess )
- {
- VmbUint32_t nWidth = 0;
- capture_err = pFrame->GetWidth( nWidth );
- if ( capture_err == VmbErrorSuccess )
- {
- VmbUint32_t nHeight = 0;
- capture_err = pFrame->GetHeight( nHeight );
- if ( capture_err == VmbErrorSuccess )
- {
- VmbUchar_t *pImage = nullptr;
- capture_err = pFrame->GetImage( pImage );
- if ( capture_err == VmbErrorSuccess)
- {
- cv::Mat image_temp = cv::Mat(nHeight, nWidth, CV_8UC1, pImage );
- image_temp.copyTo(image);
- res = true;
- }
- }
- }
- }
-
- }
- } else {
- qDebug() << "Failed to get pixel format";
- res = false;
- }
- } else {
- qDebug() << "Failed to acquire frame or incomplete";
- res = false;
- }
- } else {
- if(capture_err == VmbErrorTimeout && trigger){
- qDebug() << "Waiting for trigger?";
- }
- qDebug() << "Failed to acquire image";
- res = false;
- }
- } else {
- qDebug() << "Camera is not connected or is initalising";
- std::cerr << "Camera is not connected or is initalising" << std::endl;
- res = false;
- }
-
- qDebug() << "Capture time: " << timer.nsecsElapsed() / 1e9;
-
- return res;
-}
-
-bool CameraVimba::getImage(cv::Mat &out) {
- cv::Mat temp;
- frame_mutex.lock();
- if (image.cols == 0 || image.rows == 0){
- qDebug() << "Global image result buffer size is incorrect";
- qDebug() << "cols: " << image.cols << " rows: " << image.rows;
- return false;
- }
- image.copyTo(out);
- frame_mutex.unlock();
- return true;
-}
-
-CameraVimba::~CameraVimba(void)
-{
- close();
- emit finished();
-}
-=======
-#include "cameravimba.h"
-
-CameraVimba::CameraVimba(QObject *parent) : QObject(parent), system(VimbaSystem::GetInstance())
-{
-}
-
-
-void CameraVimba::assignThread(QThread *thread)
-{
- moveToThread(thread);
- connect(this, SIGNAL(finished()), thread, SLOT(quit()));
- connect(this, SIGNAL(finished()), this, SLOT(deleteLater()));
- connect(thread, SIGNAL(finished()), thread, SLOT(deleteLater()));
- thread->start();
-}
-
-void CameraVimba::close() {
-
- qDebug() << "Closing camera";
-
- stopCapture();
- camera->Close();
-
- connected = false;
-}
-
-bool CameraVimba::initCamera(std::string camera_serial,int binning, bool trigger, int fps)
-{
- this->camera_serial = camera_serial;
- this->binning = binning;
- this->trigger = trigger;
- this->fps = fps;
- connected = false;
-
- CameraPtrVector cameras;
- system.GetCameras(cameras);
-
- bool cameraFind = false;
- for (const CameraPtr &trial_camera : cameras){
- std::string serial;
- trial_camera->GetSerialNumber(serial);
-
- if(serial == camera_serial){
- cameraFind = true;
- camera = trial_camera;
- break;
- }
- }
-
- if (!cameraFind){
- std::cerr << "Failed to find camera with name: " << camera_serial << std::endl;
- return false;
- }
-
- // Print the model name of the camera.
- std::string model;
- camera->GetModel(model);
- qDebug() << "Using device " << model.c_str();
-
- auto error = camera->Open(VmbAccessModeFull);
-
- if (error == VmbErrorSuccess){
-
- bool success = changeFPS(fps);
- if(!success)
- qDebug() << "Failed to set FPS";
-
- // Currently binning is not supported
- //bool error = setBinning(binning);
- success = enableTrigger(trigger);
- if(!success)
- qDebug() << "Failed to set trigger mode";
-
- frame_observer = shared_ptr(new FrameObserver(camera));
- connected = true;
-
- } else {
- qDebug() << "Failed to open camera";
- connected = false;
- }
-
- return connected;
-}
-
-void CameraVimba::startCapture(void){
-
- if(capturing) return;
-
- auto res = camera->StartContinuousImageAcquisition(15, frame_observer);
-
- if(res != VmbErrorSuccess){
- qDebug() << "Couldn't start capture!" << res;
- }else{
- connect(frame_observer.get(), SIGNAL( frameReady(int) ), this, SLOT( onFrame(int) ) );
- capturing = true;
- }
-}
-
-void CameraVimba::stopCapture(void){
- if(!capturing) return;
-
- camera->StopContinuousImageAcquisition();
- disconnect(frame_observer.get(), SIGNAL( frameReady(int) ), this, SLOT( onFrame(int) ) );
- capturing = false;
-}
-
-void CameraVimba::onFrame(int status){
- if(!connected){
- return;
- }
-
- // Pick up frame
- FramePtr pFrame = frame_observer->getFrame();
- if( pFrame == nullptr){
- qDebug() << "frame pointer is NULL, late frame ready message";
- return;
- }
-
- // See if it is not corrupt
- if( status == VmbFrameStatusComplete ){
- VmbUchar_t *pBuffer;
- VmbErrorType err = pFrame.get()->GetImage(pBuffer);
- if( VmbErrorSuccess != err )
- {
- qDebug() << "Failed to get image pointer";
- return;
- }
-
- VmbPixelFormatType ePixelFormat = VmbPixelFormatMono8;
- err = pFrame->GetPixelFormat( ePixelFormat );
- if ( err != VmbErrorSuccess)
- {
- qDebug() << "Failed to get pixel format";
- return;
- }
-
- if (( ePixelFormat != VmbPixelFormatMono8 ))
- {
- qDebug() << "Invalid pixel format";
- return;
- }
-
- VmbUint32_t nImageSize = 0;
- err = pFrame->GetImageSize( nImageSize );
- if ( err != VmbErrorSuccess )
- {
- qDebug() << "Failed to get image size";
- return;
- }
-
- VmbUint32_t nWidth = 0;
- err = pFrame->GetWidth( nWidth );
- if ( err != VmbErrorSuccess ){
- qDebug() << "Failed to get image width";
- return;
- }
-
- VmbUint32_t nHeight = 0;
- err = pFrame->GetHeight( nHeight );
- if ( err != VmbErrorSuccess )
- {
- qDebug() << "Failed to get image height";
- return;
- }
-
- VmbUchar_t *pImage = nullptr;
- err = pFrame->GetImage( pImage );
- if ( err == VmbErrorSuccess)
- {
- frame_mutex.lock();
- cv::Mat image_temp = cv::Mat(static_cast(nHeight), static_cast(nWidth), CV_8UC1, pImage );
- image_temp.copyTo(image);
- frame_mutex.unlock();
-
- }
-
- camera->QueueFrame(pFrame);
- }
-
- emit captured();
-}
-
-VmbError_t CameraVimba::getStringFeature(std::string name, std::string &res){
-
- FeaturePtr feature;
- auto error = camera->GetFeatureByName(name.c_str(), feature);
- if(error == VmbErrorSuccess){
- feature->GetValue(res);
- }
-
- return error;
-}
-
-VmbError_t CameraVimba::getIntFeature(std::string name, long long &res){
-
- FeaturePtr feature;
- auto error = camera->GetFeatureByName(name.c_str(), feature);
- if(error == VmbErrorSuccess){
- VmbInt64_t out;
- feature->GetValue(out);
- res = static_cast(out);
- }
-
- return error;
-}
-
-VmbError_t CameraVimba::getBoolFeature(std::string name, bool &res){
-
- FeaturePtr feature;
- auto error = camera->GetFeatureByName(name.c_str(), feature);
- if(error == VmbErrorSuccess){
- feature->GetValue(res);
- }
-
- return error;
-}
-
-VmbError_t CameraVimba::getDoubleFeature(std::string name, double &res){
-
- FeaturePtr feature;
- auto error = camera->GetFeatureByName(name.c_str(), feature);
- if(error == VmbErrorSuccess){
- feature->GetValue(res);
- }
-
- return error;
-}
-
-void CameraVimba::getImageSize(int &width, int &height, int &bitdepth)
-{
- //get image size
- long long h = 0, w = 0;
- auto err_h = getIntFeature("Height", h);
- auto err_w = getIntFeature("Width", w);
-
- std::string format;
- auto err_fmt = getStringFeature("PixelSize", format);
-
- if (err_h == VmbErrorSuccess && err_w == VmbErrorSuccess && err_fmt == VmbErrorSuccess){
- height = static_cast(h);
- width = static_cast(w);
- if(format == "Bpp8")
- bitdepth = 8;
- else if(format == "Bpp16")
- bitdepth = 16;
- } else {
- qDebug() << "Failed to get width / height";
- }
-}
-
-bool CameraVimba::changeFPS(int fps){
- bool res = true;
- bool capture_state = capturing;
-
- if(capture_state){
- stopCapture();
- }
- // unlimited frame rate if set to 0
- if (fps > 0){
- // Order is important!
- res &= enableFPS(true);
- if(!res)
- qDebug() << "Failed to enable FPS";
- res &= setFPS(fps);
- if(!res)
- qDebug() << "Failed to set FPS";
-
- this->fps = fps;
- } else {
- res &= enableFPS(false);
- this->fps = fps;
- }
- if(capture_state){
- startCapture();
- }
- return res;
-}
-
-bool CameraVimba::setFPS(int fps){
- FeaturePtr feature;
- qDebug() << "Setting fps: " << fps;
- camera->GetFeatureByName("AcquisitionFrameRate", feature);
- auto error = feature->SetValue(static_cast(fps));
- return error == VmbErrorSuccess;
-}
-
-bool CameraVimba::enableFPS(bool enable){
- if (enable){
- qDebug() << "Enabling fps";
- } else {
- qDebug() << "Disabling fps";
- }
-
- FeaturePtr feature;
- camera->GetFeatureByName("AcquisitionFrameRateEnable", feature);
- auto error = feature->SetValue(enable);
-
- bool state;
- feature->GetValue(state);
-
- return (error == VmbErrorSuccess) && (state == enable);
-}
-
-bool CameraVimba::enableTrigger(bool enable){
- FeaturePtr feature;
- VmbError_t error;
- std::string state;
- std::string requested_trigger_mode;
- std::string requested_line_selector;
- std::string requested_line_mode;
- std::string requested_trigger_source;
- std::string requested_trigger_selector;
- bool success = true;
-
- if(enable){
- qDebug() << "Enabling trigger";
- requested_trigger_mode = "On";
- requested_trigger_source = "Line0";
- requested_trigger_selector = "FrameStart";
- requested_line_selector = "Line0";
- requested_line_mode = "Input";
- } else {
- qDebug() << "Disabling trigger";
- requested_trigger_mode = "Off";
- requested_trigger_source = "Software";
- requested_trigger_selector = "FrameStart";
- requested_line_selector = "Line0";
- requested_line_mode = "Input";
- }
-
- if (enable){
- /*
- camera->GetFeatureByName("LineSelector", feature);
- error = feature->SetValue(requested_line_selector.c_str());
- feature->GetValue(state);
- success &= (error == VmbErrorSuccess) && (state == requested_line_selector);
-
- if (!success){
- qDebug() << "Failed to set line selector";
- return success;
- }
-
- camera->GetFeatureByName("LineMode", feature);
- error = feature->SetValue(requested_line_mode.c_str());
- feature->GetValue(state);
- success &= (error == VmbErrorSuccess) && (state == requested_line_mode);
-
- if (!success){
- qDebug() << "Failed to set line mode";
- return success;
- }
- */
- }
-
- camera->GetFeatureByName("TriggerSelector", feature);
- error = feature->SetValue(requested_trigger_selector.c_str());
- feature->GetValue(state);
- success &= (error == VmbErrorSuccess) && (state == requested_trigger_selector);
-
- if (!success){
- qDebug() << "Failed to set trigger selector";
- return success;
- }
-
- camera->GetFeatureByName("TriggerSource", feature);
- error = feature->SetValue(requested_trigger_source.c_str());
- feature->GetValue(state);
- success &= (error == VmbErrorSuccess) && (state == requested_trigger_source);
-
- if (!success){
- qDebug() << "Failed to set trigger source";
- return success;
- }
-
- camera->GetFeatureByName("TriggerMode", feature);
- error = feature->SetValue(requested_trigger_mode.c_str());
- feature->GetValue(state);
- success &= (error == VmbErrorSuccess) && (state == requested_trigger_mode);
-
- if (!success){
- qDebug() << "Failed to set trigger mode";
- qDebug() << state.c_str();
- return success;
- }
-
- return success;
-}
-
-bool CameraVimba::changeBinning(int binning){
- close();
- return (initCamera(this->camera_serial,binning,this->trigger,this->fps));
-}
-
-double CameraVimba::getFPS(void){
- FeaturePtr feature;
- double fps;
- auto success = getDoubleFeature("AcquisitionFrameRate", fps);
- if(success != VmbErrorSuccess){
- fps = -1;
- }
- return fps;
-}
-
-
-bool CameraVimba::setBinning(int binning)
-{
- FeaturePtr feature;
- bool success = true;
- camera->GetFeatureByName("BinningHorizontal", feature);
- // Camera doesn't support binning
- if(feature.get() == nullptr) return false;
-
- success &= (feature->SetValue(binning) == VmbErrorSuccess);
- camera->GetFeatureByName("BinningVertical", feature);
- success &= (feature->SetValue(binning) == VmbErrorSuccess);
-
- return success;
-}
-
-bool CameraVimba::setExposure(double exposure)
-{
- FeaturePtr feature;
- //int fps = static_cast(getFPS());
- camera.get()->GetFeatureByName("ExposureTime", feature);
- double exposure_scaled = exposure * 1000;
- qDebug() << "Setting exposure to: " << exposure_scaled;
- qDebug() << "Vimba exposure to: " << exposure_scaled;
- auto error = feature->SetValue(exposure_scaled); // microseconds
-
- if(error != VmbErrorSuccess){
- qDebug() << "Failed to set exposure";
- }else{
- /*
- if(fps < 30 && 1.0/(1000.0*exposure) < 1.0/30){
- changeFPS(30);
- }
- */
- }
-
- return error == VmbErrorSuccess;
-}
-
-bool CameraVimba::enableAutoExposure(bool enable)
-{
- std::string exposure_mode = "Off";
- if(enable) exposure_mode = "Continuous";
-
- FeaturePtr feature;
-
- if (enable){
- qDebug() << "Enabling auto exposure";
- } else {
- qDebug() << "Disabling auto exposure";
- }
-
- camera->GetFeatureByName("ExposureAuto", feature);
- auto error = feature->SetValue(exposure_mode.c_str());
-
- return error == VmbErrorSuccess;
-}
-
-bool CameraVimba::enableAutoGain(bool enable)
-{
-
- if (enable){
- qDebug() << "Enabling auto gain";
- } else {
- qDebug() << "Disabling auto gain";
- }
-
- std::string gain_mode = "Off";
- if(enable) gain_mode = "Continuous";
-
- FeaturePtr feature;
- camera->GetFeatureByName("GainAuto", feature);
- auto error = feature->SetValue(gain_mode.c_str());
-
- return error == VmbErrorSuccess;
-}
-
-bool CameraVimba::setGain(int gain)
-{
-
- enableAutoGain(false);
-
- FeaturePtr feature;
- camera.get()->GetFeatureByName("Gain", feature);
-
- qDebug() << "Setting gain to: " << gain;
-
- double min_gain, max_gain;
- feature->GetRange(min_gain, max_gain);
-
- qDebug() << "Vimba gain to: " << max_gain*static_cast(gain)/100.0;
-
- auto error = feature->SetValue(max_gain*static_cast(gain)/100.0);
-
- return error == VmbErrorSuccess;
-}
-
-bool CameraVimba::capture(void)
-{
- bool res = false;
- QElapsedTimer timer;
- timer.start();
-
- if (connected){
- // get image from cameras
- AVT::VmbAPI::FramePtr pFrame;
- VmbErrorType capture_err;
- VmbFrameStatusType status = VmbFrameStatusIncomplete;
-
- //qDebug() << "Init: " << timer.nsecsElapsed() / 1e9;
-
- capture_err = camera->AcquireSingleImage( pFrame, 5000 );
- //timer.start();
-
- if ( capture_err == VmbErrorSuccess)
- {
- capture_err = pFrame->GetReceiveStatus( status );
- if ( capture_err == VmbErrorSuccess
- && status == VmbFrameStatusComplete)
- {
- VmbPixelFormatType ePixelFormat = VmbPixelFormatMono8;
- capture_err = pFrame->GetPixelFormat( ePixelFormat );
- if ( capture_err == VmbErrorSuccess)
- {
- if ( ( ePixelFormat != VmbPixelFormatMono8 ))
- {
- //capture_err = VmbErrorInvalidValue;
- qDebug() << "Invalid pixel format";
- res = false;
- }
- else
- {
- VmbUint32_t nImageSize = 0;
- capture_err = pFrame->GetImageSize( nImageSize );
- if ( capture_err == VmbErrorSuccess )
- {
- VmbUint32_t nWidth = 0;
- capture_err = pFrame->GetWidth( nWidth );
- if ( capture_err == VmbErrorSuccess )
- {
- VmbUint32_t nHeight = 0;
- capture_err = pFrame->GetHeight( nHeight );
- if ( capture_err == VmbErrorSuccess )
- {
- VmbUchar_t *pImage = nullptr;
- capture_err = pFrame->GetImage( pImage );
- if ( capture_err == VmbErrorSuccess)
- {
- cv::Mat image_temp = cv::Mat(nHeight, nWidth, CV_8UC1, pImage );
- image_temp.copyTo(image);
- res = true;
- }
- }
- }
- }
-
- }
- } else {
- qDebug() << "Failed to get pixel format";
- res = false;
- }
- } else {
- qDebug() << "Failed to acquire frame or incomplete";
- res = false;
- }
- } else {
- if(capture_err == VmbErrorTimeout && trigger){
- qDebug() << "Waiting for trigger?";
- }
- qDebug() << "Failed to acquire image";
- res = false;
- }
- } else {
- qDebug() << "Camera is not connected or is initalising";
- std::cerr << "Camera is not connected or is initalising" << std::endl;
- res = false;
- }
-
- qDebug() << "Capture time: " << timer.nsecsElapsed() / 1e9;
-
- return res;
-}
-
-bool CameraVimba::getImage(cv::Mat &out) {
- cv::Mat temp;
- frame_mutex.lock();
- if (image.cols == 0 || image.rows == 0){
- qDebug() << "Global image result buffer size is incorrect";
- qDebug() << "cols: " << image.cols << " rows: " << image.rows;
- return false;
- }
- image.copyTo(out);
- frame_mutex.unlock();
- return true;
-}
-
-CameraVimba::~CameraVimba(void)
-{
- close();
- emit finished();
-}
->>>>>>> dev
diff --git a/src/camera/cameravimba.h.orig b/src/camera/cameravimba.h.orig
deleted file mode 100644
index a0a23b3ed..000000000
--- a/src/camera/cameravimba.h.orig
+++ /dev/null
@@ -1,297 +0,0 @@
-<<<<<<< HEAD
-#ifndef CAMERAVIMBA_H
-#define CAMERAVIMBA_H
-
-#include
-#include
-#include
-#include
-#include
-
-#include
-#include
-
-using namespace AVT::VmbAPI;
-
-// Constructor for the FrameObserver class
-class FrameObserver : public QObject, public IFrameObserver{
- Q_OBJECT
-public:
- explicit FrameObserver(CameraPtr pCamera) : IFrameObserver(pCamera){}
-
- // Frame callback notifies about incoming frames
- void FrameReceived(const FramePtr pFrame){
- bool bQueueDirectly = true;
- VmbFrameStatusType eReceiveStatus;
- auto receive_error = pFrame->GetReceiveStatus( eReceiveStatus );
- int num_recievers = receivers(SIGNAL(frameReady(int)));
-
- if( num_recievers!= 0 && receive_error == VmbErrorSuccess ){
- // Lock the frame queue
- frame_mutex.lock();
- frame_queue.push( pFrame );
- // Unlock frame queue
- frame_mutex.unlock();
- // Emit the frame received signal
- emit frameReady(eReceiveStatus);
- bQueueDirectly = false;
-
- }else{
- m_pCamera->QueueFrame( pFrame );
- }
- }
-
- FramePtr getFrame(void){
- // Lock the frame queue
- frame_mutex.lock();
- // Pop frame from queue
- FramePtr res;
- if( !frame_queue.empty() )
- {
- res = frame_queue.front();
- frame_queue.pop();
- }
- // Unlock frame queue
- frame_mutex.unlock();
- return res;
- }
-
- void clearFrameQueue(){
- // Lock the frame queue
- frame_mutex.lock();
- // Clear the frame queue and release the memory
- std::queue empty;
- std::swap( frame_queue, empty );
- // Unlock the frame queue
- frame_mutex.unlock();
- }
-
-signals:
- void frameReady(int status);
-
-private:
- std::queue frame_queue;
- QMutex frame_mutex;
-};
-
-class CameraVimba : public QObject
-{
- Q_OBJECT
-public:
- explicit CameraVimba(QObject *parent = nullptr);
- bool isAvailable();
- void close();
- bool initCamera(std::string camera_serial, int binning, bool trigger, int fps);
- void assignThread(QThread *thread);
- void getImageSize(int &image_width, int &image_height, int &bit_depth);
- bool setFrame16();
- bool setFrame8();
- bool setMaximumResolution();
- bool setPTP(bool enable);
- bool setExposure(double exposure);
- bool enableAutoExposure(bool enable);
- bool enableAutoGain(bool enable);
- bool setGain(int gain);
- bool setPacketSize(int packetSize);
- bool setInterPacketDelay(int interPacketDelay);
- bool setBinning(int binning);
- bool enableTrigger(bool enable);
-
- bool changeFPS(int fps);
- bool changeBinning(int binning);
- bool changePacketSize(int packet_size);
- bool setFPS(int fps);
- bool enableFPS(bool enable);
-
- ~CameraVimba(void);
-
-signals:
- void captured();
- void finished();
-
-private:
- VimbaSystem &system;
- CameraPtr camera;
- cv::Mat image;
- cv::Mat channels[3];
- cv::Mat image_buffer;
- enum format {Y800, Y16};
- format image_format;
- bool connected = false;
- int binning = 1;
- bool trigger = false;
- int fps = 30;
- std::string camera_serial;
- int max_timeout = 2000;
- QMutex frame_mutex;
- bool capturing = false;
-
- shared_ptr frame_observer;
- FramePtrVector frame_buffer;
-
- VmbError_t getStringFeature(std::string feature, std::string &res);
- VmbError_t getBoolFeature(std::string feature, bool &res);
- VmbError_t getDoubleFeature(std::string feature, double &res);
- VmbError_t getIntFeature(std::string feature, long long &res);
- double getFPS();
-
-public slots:
- bool capture(void);
- bool getImage(cv::Mat &out);
-
- void startCapture();
- void stopCapture();
-private slots:
- void onFrame(int);
-};
-
-#endif // CAMERAVIMBA_H
-=======
-#ifndef CAMERAVIMBA_H
-#define CAMERAVIMBA_H
-
-#include
-#include
-#include
-#include
-#include
-
-#include
-#include
-
-using namespace AVT::VmbAPI;
-
-// Constructor for the FrameObserver class
-class FrameObserver : public QObject, public IFrameObserver{
- Q_OBJECT
-public:
- explicit FrameObserver(CameraPtr pCamera) : IFrameObserver(pCamera){}
-
- // Frame callback notifies about incoming frames
- void FrameReceived(const FramePtr pFrame){
- bool bQueueDirectly = true;
- VmbFrameStatusType eReceiveStatus;
- auto receive_error = pFrame->GetReceiveStatus( eReceiveStatus );
- int num_recievers = receivers(SIGNAL(frameReady(int)));
-
- if( num_recievers!= 0 && receive_error == VmbErrorSuccess ){
- // Lock the frame queue
- frame_mutex.lock();
- frame_queue.push( pFrame );
- // Unlock frame queue
- frame_mutex.unlock();
- // Emit the frame received signal
- emit frameReady(eReceiveStatus);
- bQueueDirectly = false;
-
- }else{
- m_pCamera->QueueFrame( pFrame );
- }
- }
-
- FramePtr getFrame(void){
- // Lock the frame queue
- frame_mutex.lock();
- // Pop frame from queue
- FramePtr res;
- if( !frame_queue.empty() )
- {
- res = frame_queue.front();
- frame_queue.pop();
- }
- // Unlock frame queue
- frame_mutex.unlock();
- return res;
- }
-
- void clearFrameQueue(){
- // Lock the frame queue
- frame_mutex.lock();
- // Clear the frame queue and release the memory
- std::queue empty;
- std::swap( frame_queue, empty );
- // Unlock the frame queue
- frame_mutex.unlock();
- }
-
-signals:
- void frameReady(int status);
-
-private:
- std::queue frame_queue;
- QMutex frame_mutex;
-};
-
-class CameraVimba : public QObject
-{
- Q_OBJECT
-public:
- explicit CameraVimba(QObject *parent = nullptr);
- bool isAvailable();
- void close();
- bool initCamera(std::string camera_serial, int binning, bool trigger, int fps);
- void assignThread(QThread *thread);
- void getImageSize(int &image_width, int &image_height, int &bit_depth);
- bool setFrame16();
- bool setFrame8();
- bool setMaximumResolution();
- bool setPTP(bool enable);
- bool setExposure(double exposure);
- bool enableAutoExposure(bool enable);
- bool enableAutoGain(bool enable);
- bool setGain(int gain);
- bool setPacketSize(int packetSize);
- bool setInterPacketDelay(int interPacketDelay);
- bool setBinning(int binning);
- bool enableTrigger(bool enable);
-
- bool changeFPS(int fps);
- bool changeBinning(int binning);
- bool changePacketSize(int packet_size);
- bool setFPS(int fps);
- bool enableFPS(bool enable);
-
- ~CameraVimba(void);
-
-signals:
- void captured();
- void finished();
-
-private:
- VimbaSystem &system;
- CameraPtr camera;
- cv::Mat image;
- cv::Mat channels[3];
- cv::Mat image_buffer;
- enum format {Y800, Y16};
- format image_format;
- bool connected = false;
- int binning = 1;
- bool trigger = false;
- int fps = 30;
- std::string camera_serial;
- int max_timeout = 2000;
- QMutex frame_mutex;
- bool capturing = false;
-
- shared_ptr frame_observer;
- FramePtrVector frame_buffer;
-
- VmbError_t getStringFeature(std::string feature, std::string &res);
- VmbError_t getBoolFeature(std::string feature, bool &res);
- VmbError_t getDoubleFeature(std::string feature, double &res);
- VmbError_t getIntFeature(std::string feature, long long &res);
- double getFPS();
-
-public slots:
- bool capture(void);
- bool getImage(cv::Mat &out);
-
- void startCapture();
- void stopCapture();
-private slots:
- void onFrame(int);
-};
-
-#endif // CAMERAVIMBA_H
->>>>>>> dev
diff --git a/src/detection/boundingbox.h.orig b/src/detection/boundingbox.h.orig
deleted file mode 100644
index ddfec334e..000000000
--- a/src/detection/boundingbox.h.orig
+++ /dev/null
@@ -1,43 +0,0 @@
-<<<<<<< HEAD
-/*
-* Copyright Deeplabel, used with permission of the author
-* Author: Josh Veitch-Michaelis (jveitch@i3drobotics.com)
-*/
-#ifndef BOUNDINGBOX_H
-#define BOUNDINGBOX_H
-
-#include
-#include
-
-typedef struct{
- QRect rect = QRect(0,0,0,0);
- QString classname = "";
- int occluded = 0;
- bool truncated = false;
- int classid = 0;
- double confidence = 0;
-} BoundingBox;
-
-#endif // BOUNDINGBOX_H
-=======
-/*
-* Copyright Deeplabel, used with permission of the author
-* Author: Josh Veitch-Michaelis (jveitch@i3drobotics.com)
-*/
-#ifndef BOUNDINGBOX_H
-#define BOUNDINGBOX_H
-
-#include
-#include
-
-typedef struct{
- QRect rect = QRect(0,0,0,0);
- QString classname = "";
- int occluded = 0;
- bool truncated = false;
- int classid = 0;
- double confidence = 0;
-} BoundingBox;
-
-#endif // BOUNDINGBOX_H
->>>>>>> dev
diff --git a/src/detection/detectoropencv.cpp.orig b/src/detection/detectoropencv.cpp.orig
deleted file mode 100644
index 79c206b84..000000000
--- a/src/detection/detectoropencv.cpp.orig
+++ /dev/null
@@ -1,883 +0,0 @@
-<<<<<<< HEAD
-/*
-* Copyright Deeplabel, used with permission of the author
-* Author: Josh Veitch-Michaelis (jveitch@i3drobotics.com)
-*/
-
-#include "detectoropencv.h"
-
-DetectorOpenCV::DetectorOpenCV(QObject *parent) : QObject(parent)
-{
-
-}
-
-void DetectorOpenCV::assignThread(QThread *thread){
- qDebug() << "Moving detector to thread";
- thread_ = thread;
- this->moveToThread(thread_);
- connect(this, SIGNAL(finished()), thread_, SLOT(quit()));
- //connect(this, SIGNAL(finished()), this, SLOT(deleteLater()));
- connect(thread_, SIGNAL(finished()), thread_, SLOT(deleteLater()));
- thread_->start();
- thread_->setPriority(QThread::LowestPriority);
-}
-
-void DetectorOpenCV::setImageSize(int width, int height){
- if(width > 0 && height > 0){
- input_width = width;
- input_height = height;
- }
-}
-
-void DetectorOpenCV::readNamesFile(std::string class_file){
-
- class_names.clear();
-
- // Load names of classes
- std::ifstream ifs(class_file.c_str());
- std::string line;
- int i=0;
- while (std::getline(ifs, line)){
- class_names.push_back(line);
- std::cout << "Added detection class: " << i++ << " " << class_names.back() << std::endl;
- }
-}
-
-void DetectorOpenCV::setChannels(int channels){
- input_channels = channels;
-}
-
-void DetectorOpenCV::setTarget(int target){
- preferable_target = target;
-
- if(preferable_target == cv::dnn::DNN_TARGET_OPENCL){
- // Check for GPU
- cv::ocl::Context context;
-
- if(!cv::ocl::haveOpenCL()){
- std::cout << "OpenCL is not available. Falling back to CPU" << std::endl;
- preferable_target = cv::dnn::DNN_TARGET_CPU;
- }
-
- // Attempt to use a GPU
- if(context.create(cv::ocl::Device::TYPE_GPU)){
- std::cout << "Found OpenCL capable GPU - we're going to use it!" << std::endl;
- cv::ocl::Device(context.device(1));
- }
- }
-#ifdef WITH_CUDA
- else if(preferable_target == cv::dnn::DNN_TARGET_CUDA || preferable_target == cv::dnn::DNN_TARGET_CUDA_FP16){
- try
- {
- // Check for GPU
- if (cv::cuda::getCudaEnabledDeviceCount() <= 0) {
- qDebug() << "CUDA is not available. Falling back to CPU";
- preferable_target = cv::dnn::DNN_TARGET_CPU;
- }else{
- qDebug() << "NVIDIA GPU detected.";
- }
- }
- catch( cv::Exception& e )
- {
- const char* err_msg = e.what();
- std::cout << "exception caught: " << err_msg << std::endl;
- qDebug() << "CUDA is not available. Falling back to CPU";
- preferable_target = cv::dnn::DNN_TARGET_CPU;
- }
- }
-#endif
-}
-
-void DetectorOpenCV::loadNetwork(std::string names_file, std::string cfg_file, std::string model_file){
- // Load the names
- readNamesFile(names_file);
-
- // Infer network type automatically
- net = cv::dnn::readNet(model_file, cfg_file);
-
- // Should default to DNN_BACKEND_OPENCV (otherwise Intel inference engine)
- if(preferable_target == cv::dnn::DNN_TARGET_CUDA || preferable_target == cv::dnn::DNN_TARGET_CUDA_FP16){
- net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
- qDebug() << "Set backend and target to CUDA";
- }else{
- net.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV);
- }
-
- // Network target, e.g. CUDA, CPU, etc
- net.setPreferableTarget(preferable_target);
-
- getOutputClassNames();
-
- ready = true;
-}
-
-void DetectorOpenCV::getOutputClassNames()
-{
- output_names.clear();
-
- //Get the indices of the output layers, i.e. the layers with unconnected outputs
- std::vector outLayers = net.getUnconnectedOutLayers();
-
- //get the names of all the layers in the network
- auto layersNames = net.getLayerNames();
-
- // Get the names of the output layers in names
- output_names.resize(outLayers.size());
-
- for (size_t i = 0; i < outLayers.size(); ++i){
- output_names.at(i) = layersNames.at(static_cast(outLayers[i]) - 1);
- }
-
-}
-
-void DetectorOpenCV::postProcessTensorflow(cv::Mat& frame, const std::vector& outputs, std::vector &filtered_boxes){
- std::vector classIds;
- std::vector confidences;
- std::vector boxes;
-
- auto detections = outputs.at(0);
- const int numDetections = detections.size[2];
-
- std::cout << "Outputs_size: " << detections.total() << std::endl;
- std::cout << "Number of detections: " << numDetections << std::endl;
-
- // batch id, class id, confidence, bbox (x4)
- detections = detections.reshape(1, static_cast(detections.total() / 7));
-
- // There are top-k (= 100 typical) detections, most of which should have
- // more or less zero confidence.
- for (int i = 0; i < numDetections; ++i)
- {
- float confidence = detections.at(i, 2);
-
- if (confidence > confThreshold )
- {
-
- // Extract the bounding box
- int classId = static_cast(detections.at(i, 1));
- int left = static_cast(frame.cols * detections.at(i, 3));
- int top = static_cast(frame.rows * detections.at(i, 4));
- int right = static_cast(frame.cols * detections.at(i, 5));
- int bottom = static_cast(frame.rows * detections.at(i, 6));
-
- BoundingBox bbox;
- bbox.rect.setLeft(std::max(0, std::min(left, frame.cols - 1)));
- bbox.rect.setTop(std::max(0, std::min(top, frame.rows - 1)));
- bbox.rect.setRight(std::max(0, std::min(right, frame.cols - 1)));
- bbox.rect.setBottom(std::max(0, std::min(bottom, frame.rows - 1)));
- bbox.confidence = static_cast(confidence);
- bbox.classid = classId;
- bbox.classname = QString::fromStdString(class_names.at(static_cast(bbox.classid)));
-
- /*
- std::cout << "Found (" << bbox.classid << ") " << bbox.classname.toStdString()
- << " at" << " (" << bbox.rect.center().x() << ", " << bbox.rect.center().y()
- << "), conf: " << bbox.confidence
- << ", size (" << bbox.rect.width() << "x" << bbox.rect.height() << ")"
- << std::endl;
- */
-
- filtered_boxes.push_back(bbox);
- }else{
- //
- }
- }
-}
-
-void DetectorOpenCV::postProcess(cv::Mat& frame, const std::vector& outputs, std::vector &filtered_boxes)
-{
- std::vector classIds;
- std::vector confidences;
- std::vector boxes;
-
- // Debug: this should be three because there are three scales that Yolo searches over
- //std::cout << "Outputs: " << outputs.size() << std::endl;
-
- for (size_t i = 0; i < outputs.size(); ++i)
- {
- // Scan through all the bounding boxes output from the network and keep only the
- // ones with high confidence scores. Assign the box's class label as the class
- // with the highest score for the box.
- float* data = reinterpret_cast(outputs[i].data);
- for (int j = 0; j < outputs[i].rows; ++j, data += outputs[i].cols)
- {
- cv::Mat scores = outputs[i].row(j).colRange(5, outputs[i].cols);
- cv::Point classIdPoint;
- double confidence;
- // Get the value and location of the maximum score
- minMaxLoc(scores, nullptr, &confidence, nullptr, &classIdPoint);
-
- if (confidence > 0)
- {
-
- // Output is a percentage of the frame width/height
- // so it doesn't matter that we're transforming boxes
- // between the resized and full-size image.
- int centerX = static_cast(data[0] * frame.cols);
- int centerY = static_cast(data[1] * frame.rows);
- int width = static_cast(data[2] * frame.cols);
- int height = static_cast(data[3] * frame.rows);
- int left = centerX - width / 2;
- int top = centerY - height / 2;
-
- classIds.push_back(classIdPoint.x);
- confidences.push_back(static_cast(confidence));
- boxes.push_back(cv::Rect(left, top, width, height));
-
- }else{
- if(confidence == 0.0)
- continue;
-
- std::cout << "Detected "
- << class_names.at(static_cast(classIdPoint.x))
- << " with low confidence: " << confidence << std::endl;
-
- }
- }
- }
-
- std::vector indices;
-
- // Perform non maximum suppression to eliminate redundant overlapping boxes with
- // lower confidences
-
- // We set the confidence threshold to zero here, we'll filter the boxes out later.
- // This lets us provide some feedback to the user if their threshold is too high.
- cv::dnn::NMSBoxes(boxes, confidences, static_cast(confThreshold), static_cast(nmsThreshold), indices);
-
- for (size_t i = 0; i < indices.size(); ++i)
- {
- auto idx = static_cast(indices.at(i));
-
- BoundingBox box;
- cv::Rect rect = boxes.at(idx);
-
- box.confidence = static_cast(confidences.at(idx));
- box.classid = classIds.at(idx);
- box.classname = QString::fromStdString(class_names.at(static_cast(box.classid)));
-
- // Darknet predicts box centres and half-width/height, so the
- // box can go outside the image. Clamp to the image size:
- QPoint top_left = {std::max(0, rect.x), std::max(0, rect.y)};
- QPoint bottom_right = top_left + QPoint({rect.width, rect.height});
-
- bottom_right.setX(std::min(bottom_right.x(), frame.cols));
- bottom_right.setY(std::min(bottom_right.y(), frame.rows));
-
- box.rect.setBottomRight(bottom_right);
- box.rect.setTopLeft(top_left);
-
- /*
- std::cout << "Found " << box.classname.toStdString()
- << " at" << " (" << box.rect.center().x() << ", " << box.rect.center().y()
- << "), conf: " << box.confidence
- << ", size (" << box.rect.width() << "x" << box.rect.height() << ")"
- << std::endl;
- */
-
- filtered_boxes.push_back(box);
-
- }
-
- return;
-}
-
-std::vector DetectorOpenCV::infer(cv::Mat image){
-
- std::vector detections;
- running = true;
-
- // Assume we have an alpha image if 4 channels
- if(image.channels() == 4){
- cv::cvtColor(image, image, cv::COLOR_BGRA2BGR);
- }
-
- if(convert_depth && image.elemSize() == 2){
- double minval, maxval;
- cv::minMaxIdx(image, &minval, &maxval);
-
- double range = maxval-minval;
- double scale_factor = 255.0/range;
-
- image.convertTo(image, CV_32FC1);
- image -= minval;
- image *= scale_factor;
- image.convertTo(image, CV_8UC1);
- }
-
- if(convert_grayscale && image.channels() == 1){
- cv::cvtColor(image, image, cv::COLOR_GRAY2RGB);
- }
-
- if(image.channels() != input_channels){
- std::cout << "Input channel mismatch. Expecting "
- << input_channels
- << " channels but image has " << image.channels()
- << " channels.";
-
- return detections;
- }
-
- // Make sure we service the GUI thread as well, as
- // this thread will run in a very tight loop
- QCoreApplication::processEvents();
- if(framework == FRAMEWORK_TENSORFLOW){
- detections = inferTensorflow(image);
- }else if(framework == FRAMEWORK_DARKNET){
- detections = inferDarknet(image);
- }
- QCoreApplication::processEvents();
-
- running = false;
-
- return detections;
-
-}
-
-std::vector DetectorOpenCV::inferTensorflow(cv::Mat image){
-
- std::vector results;
-
- auto mean = cv::Scalar(0,0,0);
- if(image.channels() == 1){
- mean = cv::Scalar(0);
- }
-
- /*
- // Check for 16-bit
- double scale_factor = 1/255.0;
-
- if(image.elemSize() == 2){
- scale_factor = 1/65535.0;
- }
- */
-
- auto input_size = cv::Size(image.cols, image.rows);
-
- bool swap_rb = false; // BGR->RGB?
- bool crop = false; // Use the entire image
-
- // No normalising! The model will handle it.
- auto blob = cv::dnn::blobFromImage(image, 1.0, input_size, mean, swap_rb, crop);
-
- //Sets the input to the network
- net.setInput(blob);
-
- // Runs the forward pass to get output of the output layers
- std::vector outputs;
- net.forward(outputs, output_names);
-
- postProcessTensorflow(image, outputs, results);
-
- std::vector layersTimes;
- double freq = cv::getTickFrequency() / 1000;
- processing_time = net.getPerfProfile(layersTimes) / freq;
-
- return results;
-}
-
-std::vector DetectorOpenCV::inferDarknet(cv::Mat image){
-
- std::vector results;
-
- auto mean = cv::Scalar(0,0,0);
- if(image.channels() == 1){
- mean = cv::Scalar(0);
- }
-
- // Check for 16-bit
- double scale_factor = 1/255.0;
- if(image.elemSize() == 2){
- scale_factor = 1/65535.0;
- }
-
- auto input_size = cv::Size(input_width, input_height);
-
- bool swap_rb = true; // BGR->RGB?
- bool crop = false; // Use the entire image
- auto blob = cv::dnn::blobFromImage(image, scale_factor, input_size, mean, swap_rb, crop);
-
- //Sets the input to the network
- net.setInput(blob);
-
- // Runs the forward pass to get output of the output layers
- std::vector outputs;
- net.forward(outputs, output_names);
-
- // Put efficiency information. The function getPerfProfile returns the
- // overall time for inference(t) and the timings for each of the layers(in layersTimes)
- std::vector layersTimes;
- double freq = cv::getTickFrequency() / 1000;
- processing_time = net.getPerfProfile(layersTimes) / freq;
-
- // Remove the bounding boxes with low confidence
- postProcess(image, outputs, results);
-
- return results;
-}
-
-void DetectorOpenCV::annotateImage(cv::Mat &frame, std::vector boxes, cv::Scalar colour, cv::Scalar font_colour){
- for(auto &box : boxes){
- //Draw a rectangle displaying the bounding box
-
- auto top_left = cv::Point(box.rect.left(), box.rect.top());
-
- cv::rectangle(frame, top_left,
- cv::Point(box.rect.right(), box.rect.bottom()),
- colour);
-
- //Get the label for the class name and its confidence
- std::string label = cv::format("%.2f", box.confidence);
-
- //Display the label at the top of the bounding box
- int baseLine;
- cv::Size labelSize = getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
- cv::putText(frame, label, top_left, cv::FONT_HERSHEY_SIMPLEX, 0.5, font_colour);
- }
-}
-
-DetectorOpenCV::~DetectorOpenCV(void){
- emit finished();
-}
-=======
-/*
-* Copyright Deeplabel, used with permission of the author
-* Author: Josh Veitch-Michaelis (jveitch@i3drobotics.com)
-*/
-
-#include "detectoropencv.h"
-
-DetectorOpenCV::DetectorOpenCV(QObject *parent) : QObject(parent)
-{
-
-}
-
-void DetectorOpenCV::assignThread(QThread *thread){
- qDebug() << "Moving detector to thread";
- thread_ = thread;
- this->moveToThread(thread_);
- connect(this, SIGNAL(finished()), thread_, SLOT(quit()));
- //connect(this, SIGNAL(finished()), this, SLOT(deleteLater()));
- connect(thread_, SIGNAL(finished()), thread_, SLOT(deleteLater()));
- thread_->start();
- thread_->setPriority(QThread::LowestPriority);
-}
-
-void DetectorOpenCV::setImageSize(int width, int height){
- if(width > 0 && height > 0){
- input_width = width;
- input_height = height;
- }
-}
-
-void DetectorOpenCV::readNamesFile(std::string class_file){
-
- class_names.clear();
-
- // Load names of classes
- std::ifstream ifs(class_file.c_str());
- std::string line;
- int i=0;
- while (std::getline(ifs, line)){
- class_names.push_back(line);
- std::cout << "Added detection class: " << i++ << " " << class_names.back() << std::endl;
- }
-}
-
-void DetectorOpenCV::setChannels(int channels){
- input_channels = channels;
-}
-
-void DetectorOpenCV::setTarget(int target){
- preferable_target = target;
-
- if(preferable_target == cv::dnn::DNN_TARGET_OPENCL){
- // Check for GPU
- cv::ocl::Context context;
-
- if(!cv::ocl::haveOpenCL()){
- std::cout << "OpenCL is not available. Falling back to CPU" << std::endl;
- preferable_target = cv::dnn::DNN_TARGET_CPU;
- }
-
- // Attempt to use a GPU
- if(context.create(cv::ocl::Device::TYPE_GPU)){
- std::cout << "Found OpenCL capable GPU - we're going to use it!" << std::endl;
- cv::ocl::Device(context.device(1));
- }
- }
-#ifdef WITH_CUDA
- else if(preferable_target == cv::dnn::DNN_TARGET_CUDA || preferable_target == cv::dnn::DNN_TARGET_CUDA_FP16){
- try
- {
- // Check for GPU
- if (cv::cuda::getCudaEnabledDeviceCount() <= 0) {
- qDebug() << "CUDA is not available. Falling back to CPU";
- preferable_target = cv::dnn::DNN_TARGET_CPU;
- }else{
- qDebug() << "NVIDIA GPU detected.";
- }
- }
- catch( cv::Exception& e )
- {
- const char* err_msg = e.what();
- std::cout << "exception caught: " << err_msg << std::endl;
- qDebug() << "CUDA is not available. Falling back to CPU";
- preferable_target = cv::dnn::DNN_TARGET_CPU;
- }
- }
-#endif
-}
-
-void DetectorOpenCV::loadNetwork(std::string names_file, std::string cfg_file, std::string model_file){
- // Load the names
- readNamesFile(names_file);
-
- // Infer network type automatically
- net = cv::dnn::readNet(model_file, cfg_file);
-
- // Should default to DNN_BACKEND_OPENCV (otherwise Intel inference engine)
- if(preferable_target == cv::dnn::DNN_TARGET_CUDA || preferable_target == cv::dnn::DNN_TARGET_CUDA_FP16){
- net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
- qDebug() << "Set backend and target to CUDA";
- }else{
- net.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV);
- }
-
- // Network target, e.g. CUDA, CPU, etc
- net.setPreferableTarget(preferable_target);
-
- getOutputClassNames();
-
- ready = true;
-}
-
-void DetectorOpenCV::getOutputClassNames()
-{
- output_names.clear();
-
- //Get the indices of the output layers, i.e. the layers with unconnected outputs
- std::vector outLayers = net.getUnconnectedOutLayers();
-
- //get the names of all the layers in the network
- auto layersNames = net.getLayerNames();
-
- // Get the names of the output layers in names
- output_names.resize(outLayers.size());
-
- for (size_t i = 0; i < outLayers.size(); ++i){
- output_names.at(i) = layersNames.at(static_cast(outLayers[i]) - 1);
- }
-
-}
-
-void DetectorOpenCV::postProcessTensorflow(cv::Mat& frame, const std::vector& outputs, std::vector &filtered_boxes){
- std::vector classIds;
- std::vector confidences;
- std::vector boxes;
-
- auto detections = outputs.at(0);
- const int numDetections = detections.size[2];
-
- std::cout << "Outputs_size: " << detections.total() << std::endl;
- std::cout << "Number of detections: " << numDetections << std::endl;
-
- // batch id, class id, confidence, bbox (x4)
- detections = detections.reshape(1, static_cast(detections.total() / 7));
-
- // There are top-k (= 100 typical) detections, most of which should have
- // more or less zero confidence.
- for (int i = 0; i < numDetections; ++i)
- {
- float confidence = detections.at(i, 2);
-
- if (confidence > confThreshold )
- {
-
- // Extract the bounding box
- int classId = static_cast(detections.at(i, 1));
- int left = static_cast(frame.cols * detections.at(i, 3));
- int top = static_cast(frame.rows * detections.at(i, 4));
- int right = static_cast(frame.cols * detections.at(i, 5));
- int bottom = static_cast(frame.rows * detections.at(i, 6));
-
- BoundingBox bbox;
- bbox.rect.setLeft(std::max(0, std::min(left, frame.cols - 1)));
- bbox.rect.setTop(std::max(0, std::min(top, frame.rows - 1)));
- bbox.rect.setRight(std::max(0, std::min(right, frame.cols - 1)));
- bbox.rect.setBottom(std::max(0, std::min(bottom, frame.rows - 1)));
- bbox.confidence = static_cast(confidence);
- bbox.classid = classId;
- bbox.classname = QString::fromStdString(class_names.at(static_cast(bbox.classid)));
-
- /*
- std::cout << "Found (" << bbox.classid << ") " << bbox.classname.toStdString()
- << " at" << " (" << bbox.rect.center().x() << ", " << bbox.rect.center().y()
- << "), conf: " << bbox.confidence
- << ", size (" << bbox.rect.width() << "x" << bbox.rect.height() << ")"
- << std::endl;
- */
-
- filtered_boxes.push_back(bbox);
- }else{
- //
- }
- }
-}
-
-void DetectorOpenCV::postProcess(cv::Mat& frame, const std::vector& outputs, std::vector &filtered_boxes)
-{
- std::vector classIds;
- std::vector confidences;
- std::vector boxes;
-
- // Debug: this should be three because there are three scales that Yolo searches over
- //std::cout << "Outputs: " << outputs.size() << std::endl;
-
- for (size_t i = 0; i < outputs.size(); ++i)
- {
- // Scan through all the bounding boxes output from the network and keep only the
- // ones with high confidence scores. Assign the box's class label as the class
- // with the highest score for the box.
- float* data = reinterpret_cast(outputs[i].data);
- for (int j = 0; j < outputs[i].rows; ++j, data += outputs[i].cols)
- {
- cv::Mat scores = outputs[i].row(j).colRange(5, outputs[i].cols);
- cv::Point classIdPoint;
- double confidence;
- // Get the value and location of the maximum score
- minMaxLoc(scores, nullptr, &confidence, nullptr, &classIdPoint);
-
- if (confidence > 0)
- {
-
- // Output is a percentage of the frame width/height
- // so it doesn't matter that we're transforming boxes
- // between the resized and full-size image.
- int centerX = static_cast(data[0] * frame.cols);
- int centerY = static_cast(data[1] * frame.rows);
- int width = static_cast(data[2] * frame.cols);
- int height = static_cast(data[3] * frame.rows);
- int left = centerX - width / 2;
- int top = centerY - height / 2;
-
- classIds.push_back(classIdPoint.x);
- confidences.push_back(static_cast(confidence));
- boxes.push_back(cv::Rect(left, top, width, height));
-
- }else{
- if(confidence == 0.0)
- continue;
-
- std::cout << "Detected "
- << class_names.at(static_cast(classIdPoint.x))
- << " with low confidence: " << confidence << std::endl;
-
- }
- }
- }
-
- std::vector indices;
-
- // Perform non maximum suppression to eliminate redundant overlapping boxes with
- // lower confidences
-
- // We set the confidence threshold to zero here, we'll filter the boxes out later.
- // This lets us provide some feedback to the user if their threshold is too high.
- cv::dnn::NMSBoxes(boxes, confidences, static_cast(confThreshold), static_cast(nmsThreshold), indices);
-
- for (size_t i = 0; i < indices.size(); ++i)
- {
- auto idx = static_cast(indices.at(i));
-
- BoundingBox box;
- cv::Rect rect = boxes.at(idx);
-
- box.confidence = static_cast(confidences.at(idx));
- box.classid = classIds.at(idx);
- box.classname = QString::fromStdString(class_names.at(static_cast(box.classid)));
-
- // Darknet predicts box centres and half-width/height, so the
- // box can go outside the image. Clamp to the image size:
- QPoint top_left = {std::max(0, rect.x), std::max(0, rect.y)};
- QPoint bottom_right = top_left + QPoint({rect.width, rect.height});
-
- bottom_right.setX(std::min(bottom_right.x(), frame.cols));
- bottom_right.setY(std::min(bottom_right.y(), frame.rows));
-
- box.rect.setBottomRight(bottom_right);
- box.rect.setTopLeft(top_left);
-
- /*
- std::cout << "Found " << box.classname.toStdString()
- << " at" << " (" << box.rect.center().x() << ", " << box.rect.center().y()
- << "), conf: " << box.confidence
- << ", size (" << box.rect.width() << "x" << box.rect.height() << ")"
- << std::endl;
- */
-
- filtered_boxes.push_back(box);
-
- }
-
- return;
-}
-
-std::vector DetectorOpenCV::infer(cv::Mat image){
-
- std::vector detections;
- running = true;
-
- // Assume we have an alpha image if 4 channels
- if(image.channels() == 4){
- cv::cvtColor(image, image, cv::COLOR_BGRA2BGR);
- }
-
- if(convert_depth && image.elemSize() == 2){
- double minval, maxval;
- cv::minMaxIdx(image, &minval, &maxval);
-
- double range = maxval-minval;
- double scale_factor = 255.0/range;
-
- image.convertTo(image, CV_32FC1);
- image -= minval;
- image *= scale_factor;
- image.convertTo(image, CV_8UC1);
- }
-
- if(convert_grayscale && image.channels() == 1){
- cv::cvtColor(image, image, cv::COLOR_GRAY2RGB);
- }
-
- if(image.channels() != input_channels){
- std::cout << "Input channel mismatch. Expecting "
- << input_channels
- << " channels but image has " << image.channels()
- << " channels.";
-
- return detections;
- }
-
- // Make sure we service the GUI thread as well, as
- // this thread will run in a very tight loop
- QCoreApplication::processEvents();
- if(framework == FRAMEWORK_TENSORFLOW){
- detections = inferTensorflow(image);
- }else if(framework == FRAMEWORK_DARKNET){
- detections = inferDarknet(image);
- }
- QCoreApplication::processEvents();
-
- running = false;
-
- return detections;
-
-}
-
-std::vector DetectorOpenCV::inferTensorflow(cv::Mat image){
-
- std::vector results;
-
- auto mean = cv::Scalar(0,0,0);
- if(image.channels() == 1){
- mean = cv::Scalar(0);
- }
-
- /*
- // Check for 16-bit
- double scale_factor = 1/255.0;
-
- if(image.elemSize() == 2){
- scale_factor = 1/65535.0;
- }
- */
-
- auto input_size = cv::Size(image.cols, image.rows);
-
- bool swap_rb = false; // BGR->RGB?
- bool crop = false; // Use the entire image
-
- // No normalising! The model will handle it.
- auto blob = cv::dnn::blobFromImage(image, 1.0, input_size, mean, swap_rb, crop);
-
- //Sets the input to the network
- net.setInput(blob);
-
- // Runs the forward pass to get output of the output layers
- std::vector outputs;
- net.forward(outputs, output_names);
-
- postProcessTensorflow(image, outputs, results);
-
- std::vector layersTimes;
- double freq = cv::getTickFrequency() / 1000;
- processing_time = net.getPerfProfile(layersTimes) / freq;
-
- return results;
-}
-
-std::vector DetectorOpenCV::inferDarknet(cv::Mat image){
-
- std::vector results;
-
- auto mean = cv::Scalar(0,0,0);
- if(image.channels() == 1){
- mean = cv::Scalar(0);
- }
-
- // Check for 16-bit
- double scale_factor = 1/255.0;
- if(image.elemSize() == 2){
- scale_factor = 1/65535.0;
- }
-
- auto input_size = cv::Size(input_width, input_height);
-
- bool swap_rb = true; // BGR->RGB?
- bool crop = false; // Use the entire image
- auto blob = cv::dnn::blobFromImage(image, scale_factor, input_size, mean, swap_rb, crop);
-
- //Sets the input to the network
- net.setInput(blob);
-
- // Runs the forward pass to get output of the output layers
- std::vector outputs;
- net.forward(outputs, output_names);
-
- // Put efficiency information. The function getPerfProfile returns the
- // overall time for inference(t) and the timings for each of the layers(in layersTimes)
- std::vector layersTimes;
- double freq = cv::getTickFrequency() / 1000;
- processing_time = net.getPerfProfile(layersTimes) / freq;
-
- // Remove the bounding boxes with low confidence
- postProcess(image, outputs, results);
-
- return results;
-}
-
-void DetectorOpenCV::annotateImage(cv::Mat &frame, std::vector boxes, cv::Scalar colour, cv::Scalar font_colour){
- for(auto &box : boxes){
- //Draw a rectangle displaying the bounding box
-
- auto top_left = cv::Point(box.rect.left(), box.rect.top());
-
- cv::rectangle(frame, top_left,
- cv::Point(box.rect.right(), box.rect.bottom()),
- colour);
-
- //Get the label for the class name and its confidence
- std::string label = cv::format("%.2f", box.confidence);
-
- //Display the label at the top of the bounding box
- int baseLine;
- cv::Size labelSize = getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
- cv::putText(frame, label, top_left, cv::FONT_HERSHEY_SIMPLEX, 0.5, font_colour);
- }
-}
-
-DetectorOpenCV::~DetectorOpenCV(void){
- emit finished();
-}
->>>>>>> dev
diff --git a/src/detection/detectoropencv.h.orig b/src/detection/detectoropencv.h.orig
deleted file mode 100644
index 0651b6fb7..000000000
--- a/src/detection/detectoropencv.h.orig
+++ /dev/null
@@ -1,221 +0,0 @@
-<<<<<<< HEAD
-/*
-* Copyright Deeplabel, used with permission of the author
-* Author: Josh Veitch-Michaelis (jveitch@i3drobotics.com)
-*/
-#ifndef DETECTOROPENCV_H
-#define DETECTOROPENCV_H
-
-#include
-#include
-#include
-#include
-
-#include
-#include
-#include
-
-#include
-
-#include
-#include
-#include
-#include
-
-typedef enum {
- FRAMEWORK_TENSORFLOW,
- FRAMEWORK_DARKNET,
- FRAMEWORK_ONNX,
- FRAMEWORK_PYTORCH
-} model_framework;
-
-class DetectorOpenCV : public QObject {
-
-Q_OBJECT
-
-public:
- DetectorOpenCV(QObject *parent = nullptr);
-
- void setImageSize(int width, int height);
-
- void loadNetwork(std::string names_file, std::string cfg_file, std::string model_file);
-
- void annotateImage(cv::Mat &image,
- std::vector boxes,
- cv::Scalar colour = cv::Scalar(0,0,255),
- cv::Scalar font_colour = cv::Scalar(255,255,255));
-
- std::vector inferDarknet(cv::Mat image);
- std::vector inferTensorflow(cv::Mat image);
-
- double getConfidenceThreshold(void){ return confThreshold;}
- double getNMSThreshold(void){ return nmsThreshold;}
-
- void assignThread(QThread* thread);
- int getChannels(void){return input_channels;}
- int getInputWidth(void){return input_width;}
- int getInputHeight(void){return input_height;}
- bool isReady(void){return ready;}
- double getProcessingTime(void){return processing_time;}
- int getNumClasses(void){return static_cast(class_names.size());}
- std::vector getClassNames(void){return class_names;}
- bool isRunning(void){return running;}
- ~DetectorOpenCV();
-
-public slots:
- std::vector infer(cv::Mat image);
- void setFramework(model_framework framework){this->framework = framework;}
-
- void setConfidenceThresholdPercent(int thresh_pc){confThreshold = std::max(0.0, thresh_pc / 100.);}
- void setConfidenceThreshold(double thresh){confThreshold = std::max(0.0, thresh);}
-
- void setNMSThresholdPercent(int thresh){nmsThreshold = std::max(0.0, thresh / 100.0);}
- void setNMSThreshold(double thresh){nmsThreshold = std::max(0.0, thresh);}
-
- void setConvertGrayscale(bool convert){convert_grayscale = convert;}
- void setConvertDepth(bool convert){convert_depth = convert;}
- void setTarget(int target);
- void setChannels(int channels);
-
-private:
-
- void postProcess(cv::Mat& frame, const std::vector& outs, std::vector &filtered_outputs);
- void readNamesFile(std::string class_file = "coco.names");
- void getOutputClassNames(void);
-
- bool convert_grayscale = true;
- bool convert_depth = true;
- double processing_time;
- double confThreshold = 0.5; // Confidence threshold
- double nmsThreshold = 0.4; // Non-maximum suppression threshold
- int input_width = 416; // Width of network's input image
- int input_height = 416; // Height of network's input image
- int input_channels = 3;
- int preferable_target = cv::dnn::DNN_TARGET_OPENCL;
- model_framework framework = FRAMEWORK_DARKNET;
- bool ready = false;
- bool running = false;
- QThread* thread_;
-
- std::vector class_names;
- std::vector output_names;
- cv::dnn::Net net;
- void postProcessTensorflow(cv::Mat &frame, const std::vector &outputs, std::vector &filtered_boxes);
-
-signals:
- void finished();
- void objectsDetected();
-};
-
-#endif // DETECTOROPENCV_H
-=======
-/*
-* Copyright Deeplabel, used with permission of the author
-* Author: Josh Veitch-Michaelis (jveitch@i3drobotics.com)
-*/
-#ifndef DETECTOROPENCV_H
-#define DETECTOROPENCV_H
-
-#include
-#include
-#include
-#include
-
-#include
-#include
-#include
-
-#include
-
-#include
-#include
-#include
-#include
-
-typedef enum {
- FRAMEWORK_TENSORFLOW,
- FRAMEWORK_DARKNET,
- FRAMEWORK_ONNX,
- FRAMEWORK_PYTORCH
-} model_framework;
-
-class DetectorOpenCV : public QObject {
-
-Q_OBJECT
-
-public:
- DetectorOpenCV(QObject *parent = nullptr);
-
- void setImageSize(int width, int height);
-
- void loadNetwork(std::string names_file, std::string cfg_file, std::string model_file);
-
- void annotateImage(cv::Mat &image,
- std::vector boxes,
- cv::Scalar colour = cv::Scalar(0,0,255),
- cv::Scalar font_colour = cv::Scalar(255,255,255));
-
- std::vector inferDarknet(cv::Mat image);
- std::vector inferTensorflow(cv::Mat image);
-
- double getConfidenceThreshold(void){ return confThreshold;}
- double getNMSThreshold(void){ return nmsThreshold;}
-
- void assignThread(QThread* thread);
- int getChannels(void){return input_channels;}
- int getInputWidth(void){return input_width;}
- int getInputHeight(void){return input_height;}
- bool isReady(void){return ready;}
- double getProcessingTime(void){return processing_time;}
- int getNumClasses(void){return static_cast(class_names.size());}
- std::vector getClassNames(void){return class_names;}
- bool isRunning(void){return running;}
- ~DetectorOpenCV();
-
-public slots:
- std::vector infer(cv::Mat image);
- void setFramework(model_framework framework){this->framework = framework;}
-
- void setConfidenceThresholdPercent(int thresh_pc){confThreshold = std::max(0.0, thresh_pc / 100.);}
- void setConfidenceThreshold(double thresh){confThreshold = std::max(0.0, thresh);}
-
- void setNMSThresholdPercent(int thresh){nmsThreshold = std::max(0.0, thresh / 100.0);}
- void setNMSThreshold(double thresh){nmsThreshold = std::max(0.0, thresh);}
-
- void setConvertGrayscale(bool convert){convert_grayscale = convert;}
- void setConvertDepth(bool convert){convert_depth = convert;}
- void setTarget(int target);
- void setChannels(int channels);
-
-private:
-
- void postProcess(cv::Mat& frame, const std::vector& outs, std::vector &filtered_outputs);
- void readNamesFile(std::string class_file = "coco.names");
- void getOutputClassNames(void);
-
- bool convert_grayscale = true;
- bool convert_depth = true;
- double processing_time;
- double confThreshold = 0.5; // Confidence threshold
- double nmsThreshold = 0.4; // Non-maximum suppression threshold
- int input_width = 416; // Width of network's input image
- int input_height = 416; // Height of network's input image
- int input_channels = 3;
- int preferable_target = cv::dnn::DNN_TARGET_OPENCL;
- model_framework framework = FRAMEWORK_DARKNET;
- bool ready = false;
- bool running = false;
- QThread* thread_;
-
- std::vector class_names;
- std::vector output_names;
- cv::dnn::Net net;
- void postProcessTensorflow(cv::Mat &frame, const std::vector &outputs, std::vector &filtered_boxes);
-
-signals:
- void finished();
- void objectsDetected();
-};
-
-#endif // DETECTOROPENCV_H
->>>>>>> dev
diff --git a/src/detection/detectorsetupdialog.cpp.orig b/src/detection/detectorsetupdialog.cpp.orig
deleted file mode 100644
index 442f03a08..000000000
--- a/src/detection/detectorsetupdialog.cpp.orig
+++ /dev/null
@@ -1,635 +0,0 @@
-<<<<<<< HEAD
-/*
-* Copyright Deeplabel, used with permission of the author
-* Author: Josh Veitch-Michaelis (jveitch@i3drobotics.com)
-*/
-
-#include "detectorsetupdialog.h"
-#include "ui_detectorsetupdialog.h"
-
-DetectorSetupDialog::DetectorSetupDialog(QWidget *parent) :
- QDialog(parent),
- ui(new Ui::DetectorSetupDialog)
-{
- ui->setupUi(this);
-
- connect(ui->cfgPathButton, SIGNAL(clicked()), this, SLOT(setCfgFile()));
- connect(ui->cfgPathLineEdit, SIGNAL(editingFinished()), this, SLOT(checkForm()));
- connect(ui->weightPathButton, SIGNAL(clicked(bool)), this, SLOT(setWeightsFile()));
- connect(ui->weightPathLineEdit, SIGNAL(editingFinished()), this, SLOT(checkForm()));
- connect(ui->namesPathButton, SIGNAL(clicked(bool)), this, SLOT(setNamesFile()));
- connect(ui->frameworkComboBox, SIGNAL(currentIndexChanged(QString)), this, SLOT(setFramework()));
- connect(ui->targetComboBox, SIGNAL(currentIndexChanged(QString)), this, SLOT(setTarget()));
- connect(ui->namesPathLineEdit, SIGNAL(editingFinished()), this, SLOT(checkForm()));
- connect(ui->convertGrayscaleCheckbox, SIGNAL(clicked(bool)), this, SLOT(setConvertGrayscale()));
- connect(ui->convertDepthCheckbox, SIGNAL(clicked(bool)), this, SLOT(setConvertDepth()));
-
- settings = new QSettings("I3Dr", "Stereo Vision Toolkit");
-
- cfg_file = settings->value("model_cfg", "").toString();
- weight_file = settings->value("model_weights", "").toString();
- names_file = settings->value("model_names", "").toString();
-
- convert_grayscale = settings->value("model_convert_grayscale", true).toBool();
- ui->convertGrayscaleCheckbox->setChecked(convert_grayscale);
-
- convert_depth = settings->value("model_convert_depth", true).toBool();
- ui->convertDepthCheckbox->setChecked(convert_depth);
-
- image_width = settings->value("model_width", 0).toInt();
- image_height = settings->value("model_height", 0).toInt();
- image_channels = settings->value("model_channels", 3).toInt();
-
- framework = static_cast(settings->value("model_framework", 0).toInt());
- if(framework == FRAMEWORK_DARKNET){
- ui->frameworkComboBox->setCurrentText("Darknet (YOLO)");
- ui->imageHeightLabel->show();
- ui->imageWidthLabel->show();
- }else if(framework == FRAMEWORK_TENSORFLOW){
- ui->frameworkComboBox->setCurrentText("Tensorflow");
- ui->imageHeightLabel->hide();
- ui->imageWidthLabel->hide();
- }
-
- target = settings->value("model_target", cv::dnn::DNN_TARGET_CPU).toInt();
- if(target == cv::dnn::DNN_TARGET_CPU){
- ui->targetComboBox->setCurrentText("CPU");
- }else if(target == cv::dnn::DNN_TARGET_OPENCL){
- ui->targetComboBox->setCurrentText("OpenCL");
- }else if(target == cv::dnn::DNN_TARGET_OPENCL_FP16){
- ui->targetComboBox->setCurrentText("OpenCL FP16");
- }
-#ifdef WITH_CUDA
- else if(target == cv::dnn::DNN_TARGET_CUDA){
- ui->targetComboBox->setCurrentText("CUDA");
- }else if(target == cv::dnn::DNN_TARGET_CUDA_FP16){
- ui->targetComboBox->setCurrentText("CUDA FP16");
- }
-#endif
- updateFields();
- checkForm();
-
-}
-
-void DetectorSetupDialog::updateFields(){
- ui->cfgPathLineEdit->setText(cfg_file);
- ui->weightPathLineEdit->setText(weight_file);
- ui->namesPathLineEdit->setText(names_file);
-
- ui->imageWidthLabel->setText(QString::number(image_width));
- ui->imageHeightLabel->setText(QString::number(image_height));
- ui->imageChannelsLabel->setText(QString::number(image_channels));
-}
-
-void DetectorSetupDialog::setConvertGrayscale(void){
- convert_grayscale = ui->convertGrayscaleCheckbox->isChecked();
- settings->setValue("model_convert_grayscale", convert_grayscale);
-}
-
-bool DetectorSetupDialog::getConvertGrayscale(void){
- return convert_grayscale;
-}
-
-void DetectorSetupDialog::setConvertDepth(void){
- convert_depth = ui->convertDepthCheckbox->isChecked();
- settings->setValue("model_convert_depth", convert_depth);
-}
-
-bool DetectorSetupDialog::getConvertDepth(void){
- return convert_depth;
-}
-
-void DetectorSetupDialog::checkForm(void){
-
- ui->buttonBox->button(QDialogButtonBox::Ok)->setDisabled(true);
-
- cfg_file = ui->cfgPathLineEdit->text();
- weight_file = ui->weightPathLineEdit->text();
- names_file = ui->namesPathLineEdit->text();
-
- if(cfg_file == "") return;
- if(weight_file == "") return;
- if(names_file == "") return;
-
- if(!QFile(cfg_file).exists()){
- qDebug() << "Config file doesn't exist";
- return;
- }else if(!getParamsFromConfig()){
- return;
- }
-
- if(!QFile(weight_file).exists()){
- qDebug() << "Weight file doesn't exist";
- return;
- }
-
- if(!QFile(names_file).exists()){
- qDebug() << "Names file doesn't exist";
- return;
- }
-
- // At this point, all good.
- ui->buttonBox->button(QDialogButtonBox::Ok)->setEnabled(true);
- settings->setValue("model_width", image_width);
- settings->setValue("model_height", image_height);
- settings->setValue("modelchannels", image_channels);
-
- settings->setValue("model_cfg", cfg_file);
- settings->setValue("model_weights", weight_file);
- settings->setValue("model_names", names_file);
-
-}
-
-bool DetectorSetupDialog::getParamsFromConfig(void){
-
- qDebug() << "Checking config file";
-
- if(framework == FRAMEWORK_DARKNET){
- QSettings darknet_settings(cfg_file, QSettings::IniFormat);
-
- darknet_settings.beginGroup("net");
-
- auto keys = darknet_settings.childKeys();
-
- if(!darknet_settings.contains("width")){
- qDebug() << "No width parameter";
- return false;
- }
- if(!darknet_settings.contains("height")){
- qDebug() << "No height parameter";
- return false;
- }
- if(!darknet_settings.contains("channels")){
- qDebug() << "No channels parameter";
- return false;
- }
-
- auto width = darknet_settings.value("width").toInt();
- auto height = darknet_settings.value("height").toInt();
- auto channels = darknet_settings.value("channels").toInt();
-
- darknet_settings.endGroup();
-
- qDebug() << width << height << channels;
-
- if(width > 0 && height > 0 && channels > 0){
-
- qDebug() << width << height << channels;
-
- image_width = width;
- image_height = height;
- image_channels = channels;
-
- }else{
- return false;
- }
-
- }else if(framework == FRAMEWORK_TENSORFLOW){
- // In theory we can parse the .pbtxt file to figure out
- // the input layer parameters, but that either means bringing in
- // protobuf or loading the entire network via OpenCV.
-
- }
-
- updateFields();
-
- return true;
-
-}
-
-QString DetectorSetupDialog::openFile(QString title, QString search_path, QString filter){
- QString openDir;
-
- if(search_path == ""){
- openDir = settings->value("project_folder").toString();
- }else{
- openDir = QFileInfo(search_path).absoluteDir().absolutePath();
- }
-
- auto path = QFileDialog::getOpenFileName(this, title,
- openDir,
- QString("All files (*.*);;%1").arg(filter),
- &filter);
-
- return path;
-}
-
-void DetectorSetupDialog::setCfgFile(void){
-
- QString filter, title;
-
- if(framework == FRAMEWORK_DARKNET){
- filter = tr("Config (*.cfg)");
- title = tr("Select darknet config file");
- }else if(framework == FRAMEWORK_TENSORFLOW){
- filter = tr("Config (*.pbtxt)");
- title = tr("Select tensorflow config file");
- }else{
- return;
- }
-
- auto path = openFile(title, cfg_file, filter);
-
- if(path != ""){
- ui->cfgPathLineEdit->setText(path);
- }
-
- checkForm();
-}
-
-
-void DetectorSetupDialog::setWeightsFile(void){
-
- QString filter, title;
-
- if(framework == FRAMEWORK_DARKNET){
- filter = tr("Weights (*.weights)");
- title = tr("Select darknet weights file");
- }else if(framework == FRAMEWORK_TENSORFLOW){
- filter = tr("Config (*.pb)");
- title = tr("Select tensorflow frozen graph");
- }else{
- return;
- }
-
- auto path = openFile(title, weight_file, filter);
-
- if(path != ""){
- ui->weightPathLineEdit->setText(path);
- }
-
- checkForm();
-}
-
-void DetectorSetupDialog::setFramework(void){
- if(ui->frameworkComboBox->currentText().startsWith("Darknet")){
- framework = FRAMEWORK_DARKNET;
- settings->setValue("model_framework", framework);
- }else if(ui->frameworkComboBox->currentText().startsWith("Tensorflow")){
- framework = FRAMEWORK_TENSORFLOW;
- settings->setValue("model_framework", framework);
- }
-}
-
-void DetectorSetupDialog::setTarget(void){
- if(ui->targetComboBox->currentText() == "CPU"){
- target = cv::dnn::DNN_TARGET_CPU;
- settings->setValue("model_target", target);
- }else if(ui->targetComboBox->currentText() == "OpenCL"){
- target = cv::dnn::DNN_TARGET_OPENCL;
- settings->setValue("model_target", target);
- }else if(ui->targetComboBox->currentText() == "OpenCL FP16"){
- target = cv::dnn::DNN_TARGET_OPENCL_FP16;
- settings->setValue("model_target", target);
- }
-#ifdef WITH_CUDA
- else if(ui->targetComboBox->currentText() == "CUDA"){
- target = cv::dnn::DNN_TARGET_CUDA;
- settings->setValue("model_target", target);
- }else if(ui->targetComboBox->currentText() == "CUDA FP16"){
- target = cv::dnn::DNN_TARGET_CUDA_FP16;
- settings->setValue("model_target", target);
- }
-#endif
-}
-
-int DetectorSetupDialog::getTarget(void){
- return target;
-}
-
-void DetectorSetupDialog::setNamesFile(void){
-
- QString filter = tr("Names (*.names)");
- QString title = tr("Select darknet names file");
-
- auto path = openFile(title, names_file, filter);
-
- if(path != ""){
- ui->namesPathLineEdit->setText(path);
- }
-
- checkForm();
-}
-
-DetectorSetupDialog::~DetectorSetupDialog()
-{
- delete ui;
-}
-=======
-/*
-* Copyright Deeplabel, used with permission of the author
-* Author: Josh Veitch-Michaelis (jveitch@i3drobotics.com)
-*/
-
-#include "detectorsetupdialog.h"
-#include "ui_detectorsetupdialog.h"
-
-DetectorSetupDialog::DetectorSetupDialog(QWidget *parent) :
- QDialog(parent),
- ui(new Ui::DetectorSetupDialog)
-{
- ui->setupUi(this);
-
- connect(ui->cfgPathButton, SIGNAL(clicked()), this, SLOT(setCfgFile()));
- connect(ui->cfgPathLineEdit, SIGNAL(editingFinished()), this, SLOT(checkForm()));
- connect(ui->weightPathButton, SIGNAL(clicked(bool)), this, SLOT(setWeightsFile()));
- connect(ui->weightPathLineEdit, SIGNAL(editingFinished()), this, SLOT(checkForm()));
- connect(ui->namesPathButton, SIGNAL(clicked(bool)), this, SLOT(setNamesFile()));
- connect(ui->frameworkComboBox, SIGNAL(currentIndexChanged(QString)), this, SLOT(setFramework()));
- connect(ui->targetComboBox, SIGNAL(currentIndexChanged(QString)), this, SLOT(setTarget()));
- connect(ui->namesPathLineEdit, SIGNAL(editingFinished()), this, SLOT(checkForm()));
- connect(ui->convertGrayscaleCheckbox, SIGNAL(clicked(bool)), this, SLOT(setConvertGrayscale()));
- connect(ui->convertDepthCheckbox, SIGNAL(clicked(bool)), this, SLOT(setConvertDepth()));
-
- settings = new QSettings("I3Dr", "Stereo Vision Toolkit");
-
- cfg_file = settings->value("model_cfg", "").toString();
- weight_file = settings->value("model_weights", "").toString();
- names_file = settings->value("model_names", "").toString();
-
- convert_grayscale = settings->value("model_convert_grayscale", true).toBool();
- ui->convertGrayscaleCheckbox->setChecked(convert_grayscale);
-
- convert_depth = settings->value("model_convert_depth", true).toBool();
- ui->convertDepthCheckbox->setChecked(convert_depth);
-
- image_width = settings->value("model_width", 0).toInt();
- image_height = settings->value("model_height", 0).toInt();
- image_channels = settings->value("model_channels", 3).toInt();
-
- framework = static_cast(settings->value("model_framework", 0).toInt());
- if(framework == FRAMEWORK_DARKNET){
- ui->frameworkComboBox->setCurrentText("Darknet (YOLO)");
- ui->imageHeightLabel->show();
- ui->imageWidthLabel->show();
- }else if(framework == FRAMEWORK_TENSORFLOW){
- ui->frameworkComboBox->setCurrentText("Tensorflow");
- ui->imageHeightLabel->hide();
- ui->imageWidthLabel->hide();
- }
-
- target = settings->value("model_target", cv::dnn::DNN_TARGET_CPU).toInt();
- if(target == cv::dnn::DNN_TARGET_CPU){
- ui->targetComboBox->setCurrentText("CPU");
- }else if(target == cv::dnn::DNN_TARGET_OPENCL){
- ui->targetComboBox->setCurrentText("OpenCL");
- }else if(target == cv::dnn::DNN_TARGET_OPENCL_FP16){
- ui->targetComboBox->setCurrentText("OpenCL FP16");
- }
-#ifdef WITH_CUDA
- else if(target == cv::dnn::DNN_TARGET_CUDA){
- ui->targetComboBox->setCurrentText("CUDA");
- }else if(target == cv::dnn::DNN_TARGET_CUDA_FP16){
- ui->targetComboBox->setCurrentText("CUDA FP16");
- }
-#endif
- updateFields();
- checkForm();
-
-}
-
-void DetectorSetupDialog::updateFields(){
- ui->cfgPathLineEdit->setText(cfg_file);
- ui->weightPathLineEdit->setText(weight_file);
- ui->namesPathLineEdit->setText(names_file);
-
- ui->imageWidthLabel->setText(QString::number(image_width));
- ui->imageHeightLabel->setText(QString::number(image_height));
- ui->imageChannelsLabel->setText(QString::number(image_channels));
-}
-
-void DetectorSetupDialog::setConvertGrayscale(void){
- convert_grayscale = ui->convertGrayscaleCheckbox->isChecked();
- settings->setValue("model_convert_grayscale", convert_grayscale);
-}
-
-bool DetectorSetupDialog::getConvertGrayscale(void){
- return convert_grayscale;
-}
-
-void DetectorSetupDialog::setConvertDepth(void){
- convert_depth = ui->convertDepthCheckbox->isChecked();
- settings->setValue("model_convert_depth", convert_depth);
-}
-
-bool DetectorSetupDialog::getConvertDepth(void){
- return convert_depth;
-}
-
-void DetectorSetupDialog::checkForm(void){
-
- ui->buttonBox->button(QDialogButtonBox::Ok)->setDisabled(true);
-
- cfg_file = ui->cfgPathLineEdit->text();
- weight_file = ui->weightPathLineEdit->text();
- names_file = ui->namesPathLineEdit->text();
-
- if(cfg_file == "") return;
- if(weight_file == "") return;
- if(names_file == "") return;
-
- if(!QFile(cfg_file).exists()){
- qDebug() << "Config file doesn't exist";
- return;
- }else if(!getParamsFromConfig()){
- return;
- }
-
- if(!QFile(weight_file).exists()){
- qDebug() << "Weight file doesn't exist";
- return;
- }
-
- if(!QFile(names_file).exists()){
- qDebug() << "Names file doesn't exist";
- return;
- }
-
- // At this point, all good.
- ui->buttonBox->button(QDialogButtonBox::Ok)->setEnabled(true);
- settings->setValue("model_width", image_width);
- settings->setValue("model_height", image_height);
- settings->setValue("modelchannels", image_channels);
-
- settings->setValue("model_cfg", cfg_file);
- settings->setValue("model_weights", weight_file);
- settings->setValue("model_names", names_file);
-
-}
-
-bool DetectorSetupDialog::getParamsFromConfig(void){
-
- qDebug() << "Checking config file";
-
- if(framework == FRAMEWORK_DARKNET){
- QSettings darknet_settings(cfg_file, QSettings::IniFormat);
-
- darknet_settings.beginGroup("net");
-
- auto keys = darknet_settings.childKeys();
-
- if(!darknet_settings.contains("width")){
- qDebug() << "No width parameter";
- return false;
- }
- if(!darknet_settings.contains("height")){
- qDebug() << "No height parameter";
- return false;
- }
- if(!darknet_settings.contains("channels")){
- qDebug() << "No channels parameter";
- return false;
- }
-
- auto width = darknet_settings.value("width").toInt();
- auto height = darknet_settings.value("height").toInt();
- auto channels = darknet_settings.value("channels").toInt();
-
- darknet_settings.endGroup();
-
- qDebug() << width << height << channels;
-
- if(width > 0 && height > 0 && channels > 0){
-
- qDebug() << width << height << channels;
-
- image_width = width;
- image_height = height;
- image_channels = channels;
-
- }else{
- return false;
- }
-
- }else if(framework == FRAMEWORK_TENSORFLOW){
- // In theory we can parse the .pbtxt file to figure out
- // the input layer parameters, but that either means bringing in
- // protobuf or loading the entire network via OpenCV.
-
- }
-
- updateFields();
-
- return true;
-
-}
-
-QString DetectorSetupDialog::openFile(QString title, QString search_path, QString filter){
- QString openDir;
-
- if(search_path == ""){
- openDir = settings->value("project_folder").toString();
- }else{
- openDir = QFileInfo(search_path).absoluteDir().absolutePath();
- }
-
- auto path = QFileDialog::getOpenFileName(this, title,
- openDir,
- QString("All files (*.*);;%1").arg(filter),
- &filter);
-
- return path;
-}
-
-void DetectorSetupDialog::setCfgFile(void){
-
- QString filter, title;
-
- if(framework == FRAMEWORK_DARKNET){
- filter = tr("Config (*.cfg)");
- title = tr("Select darknet config file");
- }else if(framework == FRAMEWORK_TENSORFLOW){
- filter = tr("Config (*.pbtxt)");
- title = tr("Select tensorflow config file");
- }else{
- return;
- }
-
- auto path = openFile(title, cfg_file, filter);
-
- if(path != ""){
- ui->cfgPathLineEdit->setText(path);
- }
-
- checkForm();
-}
-
-
-void DetectorSetupDialog::setWeightsFile(void){
-
- QString filter, title;
-
- if(framework == FRAMEWORK_DARKNET){
- filter = tr("Weights (*.weights)");
- title = tr("Select darknet weights file");
- }else if(framework == FRAMEWORK_TENSORFLOW){
- filter = tr("Config (*.pb)");
- title = tr("Select tensorflow frozen graph");
- }else{
- return;
- }
-
- auto path = openFile(title, weight_file, filter);
-
- if(path != ""){
- ui->weightPathLineEdit->setText(path);
- }
-
- checkForm();
-}
-
-void DetectorSetupDialog::setFramework(void){
- if(ui->frameworkComboBox->currentText().startsWith("Darknet")){
- framework = FRAMEWORK_DARKNET;
- settings->setValue("model_framework", framework);
- }else if(ui->frameworkComboBox->currentText().startsWith("Tensorflow")){
- framework = FRAMEWORK_TENSORFLOW;
- settings->setValue("model_framework", framework);
- }
-}
-
-void DetectorSetupDialog::setTarget(void){
- if(ui->targetComboBox->currentText() == "CPU"){
- target = cv::dnn::DNN_TARGET_CPU;
- settings->setValue("model_target", target);
- }else if(ui->targetComboBox->currentText() == "OpenCL"){
- target = cv::dnn::DNN_TARGET_OPENCL;
- settings->setValue("model_target", target);
- }else if(ui->targetComboBox->currentText() == "OpenCL FP16"){
- target = cv::dnn::DNN_TARGET_OPENCL_FP16;
- settings->setValue("model_target", target);
- }
-#ifdef WITH_CUDA
- else if(ui->targetComboBox->currentText() == "CUDA"){
- target = cv::dnn::DNN_TARGET_CUDA;
- settings->setValue("model_target", target);
- }else if(ui->targetComboBox->currentText() == "CUDA FP16"){
- target = cv::dnn::DNN_TARGET_CUDA_FP16;
- settings->setValue("model_target", target);
- }
-#endif
-}
-
-int DetectorSetupDialog::getTarget(void){
- return target;
-}
-
-void DetectorSetupDialog::setNamesFile(void){
-
- QString filter = tr("Names (*.names)");
- QString title = tr("Select darknet names file");
-
- auto path = openFile(title, names_file, filter);
-
- if(path != ""){
- ui->namesPathLineEdit->setText(path);
- }
-
- checkForm();
-}
-
-DetectorSetupDialog::~DetectorSetupDialog()
-{
- delete ui;
-}
->>>>>>> dev
diff --git a/src/detection/detectorsetupdialog.h.orig b/src/detection/detectorsetupdialog.h.orig
deleted file mode 100644
index bb12b67af..000000000
--- a/src/detection/detectorsetupdialog.h.orig
+++ /dev/null
@@ -1,151 +0,0 @@
-<<<<<<< HEAD
-/*
-* Copyright Deeplabel, used with permission of the author
-* Author: Josh Veitch-Michaelis (jveitch@i3drobotics.com)
-*/
-
-#ifndef DETECTORSETUPDIALOG_H
-#define DETECTORSETUPDIALOG_H
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-#include
-#include
-
-namespace Ui {
-class DetectorSetupDialog;
-}
-
-class DetectorSetupDialog : public QDialog
-{
- Q_OBJECT
-
-public:
- explicit DetectorSetupDialog(QWidget *parent = nullptr);
- int getWidth(void){return image_width;}
- int getHeight(void){return image_height;}
- int getChannels(void){return image_channels;}
- QString getNames(void){return names_file;}
- QString getWeights(void){return weight_file;}
- QString getCfg(void){return cfg_file;}
- bool getConvertGrayscale(void);
- bool getConvertDepth(void);
- model_framework getFramework(void){return framework;}
- int getTarget();
- ~DetectorSetupDialog();
-
-private slots:
-
- void setCfgFile();
- void setNamesFile();
- void setWeightsFile();
- void setFramework();
- void setTarget();
- void setConvertGrayscale(void);
- void setConvertDepth(void);
-
- void checkForm();
-
-
-private:
- Ui::DetectorSetupDialog *ui;
- bool getParamsFromConfig();
- void updateFields();
-
- QSettings* settings;
-
- QString cfg_file;
- QString weight_file;
- QString names_file;
- int image_width = 320;
- int image_height = 240;
- int image_channels = 3; // default
- int target = 0;
- bool convert_grayscale = true;
- bool convert_depth = true;
- model_framework framework = FRAMEWORK_TENSORFLOW;
- QString openFile(QString title, QString search_path="", QString filter="");
-};
-
-#endif // DETECTORSETUPDIALOG_H
-=======
-/*
-* Copyright Deeplabel, used with permission of the author
-* Author: Josh Veitch-Michaelis (jveitch@i3drobotics.com)
-*/
-
-#ifndef DETECTORSETUPDIALOG_H
-#define DETECTORSETUPDIALOG_H
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-#include
-#include
-
-namespace Ui {
-class DetectorSetupDialog;
-}
-
-class DetectorSetupDialog : public QDialog
-{
- Q_OBJECT
-
-public:
- explicit DetectorSetupDialog(QWidget *parent = nullptr);
- int getWidth(void){return image_width;}
- int getHeight(void){return image_height;}
- int getChannels(void){return image_channels;}
- QString getNames(void){return names_file;}
- QString getWeights(void){return weight_file;}
- QString getCfg(void){return cfg_file;}
- bool getConvertGrayscale(void);
- bool getConvertDepth(void);
- model_framework getFramework(void){return framework;}
- int getTarget();
- ~DetectorSetupDialog();
-
-private slots:
-
- void setCfgFile();
- void setNamesFile();
- void setWeightsFile();
- void setFramework();
- void setTarget();
- void setConvertGrayscale(void);
- void setConvertDepth(void);
-
- void checkForm();
-
-
-private:
- Ui::DetectorSetupDialog *ui;
- bool getParamsFromConfig();
- void updateFields();
-
- QSettings* settings;
-
- QString cfg_file;
- QString weight_file;
- QString names_file;
- int image_width = 320;
- int image_height = 240;
- int image_channels = 3; // default
- int target = 0;
- bool convert_grayscale = true;
- bool convert_depth = true;
- model_framework framework = FRAMEWORK_TENSORFLOW;
- QString openFile(QString title, QString search_path="", QString filter="");
-};
-
-#endif // DETECTORSETUPDIALOG_H
->>>>>>> dev
diff --git a/src/detection/detectorsetupdialog.ui.orig b/src/detection/detectorsetupdialog.ui.orig
deleted file mode 100644
index 4566fb5e2..000000000
--- a/src/detection/detectorsetupdialog.ui.orig
+++ /dev/null
@@ -1,545 +0,0 @@
-<<<<<<< HEAD
-
-
- DetectorSetupDialog
-
-
-
- 0
- 0
- 500
- 314
-
-
-
-
- 500
- 0
-
-
-
- Configure object detector
-
-
- -
-
-
-
-
-
- Weight file
-
-
-
- -
-
-
-
-
- Darknet (YOLO)
-
-
- -
-
- Tensorflow
-
-
-
-
- -
-
-
-
-
- CPU
-
-
- -
-
- OpenCL
-
-
- -
-
- OpenCL FP16
-
-
- -
-
- CUDA
-
-
- -
-
- CUDA FP16
-
-
-
-
- -
-
-
- ...
-
-
-
- -
-
-
- Config file
-
-
-
- -
-
-
- ./ml/coco/yolov4-tiny.weights
-
-
-
- -
-
-
- -
-
-
-
- -
-
-
- Channels
-
-
-
- -
-
-
- Convert grayscale images to RGB
-
-
- true
-
-
-
- -
-
-
- Type
-
-
-
- -
-
-
- Image Height
-
-
-
- -
-
-
- -
-
-
-
- -
-
-
- ...
-
-
-
- -
-
-
- Loaded Network Parameters:
-
-
-
- -
-
-
- -
-
-
-
- -
-
-
- ...
-
-
-
- -
-
-
- ./ml/coco/yolov4-tiny.cfg
-
-
-
- -
-
-
- Names file
-
-
-
- -
-
-
- ./ml/coco/coco.names
-
-
-
- -
-
-
- Image Width
-
-
-
- -
-
-
- Target
-
-
-
- -
-
-
- Convert 16-bit images to 8-bit
-
-
- true
-
-
-
-
-
- -
-
-
- Qt::Horizontal
-
-
- QDialogButtonBox::Cancel|QDialogButtonBox::Ok
-
-
-
-
-
-
- cfgPathLineEdit
- cfgPathButton
- weightPathLineEdit
- weightPathButton
- namesPathLineEdit
- namesPathButton
-
-
-
-
- buttonBox
- accepted()
- DetectorSetupDialog
- accept()
-
-
- 248
- 254
-
-
- 157
- 274
-
-
-
-
- buttonBox
- rejected()
- DetectorSetupDialog
- reject()
-
-
- 316
- 260
-
-
- 286
- 274
-
-
-
-
-
-=======
-
-
- DetectorSetupDialog
-
-
-
- 0
- 0
- 500
- 314
-
-
-
-
- 500
- 0
-
-
-
- Configure object detector
-
-
- -
-
-
-
-
-
- Weight file
-
-
-
- -
-
-
-
-
- Darknet (YOLO)
-
-
- -
-
- Tensorflow
-
-
-
-
- -
-
-
-
-
- CPU
-
-
- -
-
- OpenCL
-
-
- -
-
- OpenCL FP16
-
-
- -
-
- CUDA
-
-
- -
-
- CUDA FP16
-
-
-
-
- -
-
-
- ...
-
-
-
- -
-
-
- Config file
-
-
-
- -
-
-
- ./ml/coco/yolov4-tiny.weights
-
-
-
- -
-
-
- -
-
-
-
- -
-
-
- Channels
-
-
-
- -
-
-
- Convert grayscale images to RGB
-
-
- true
-
-
-
- -
-
-
- Type
-
-
-
- -
-
-
- Image Height
-
-
-
- -
-
-
- -
-
-
-
- -
-
-
- ...
-
-
-
- -
-
-
- Loaded Network Parameters:
-
-
-
- -
-
-
- -
-
-
-
- -
-
-
- ...
-
-
-
- -
-
-
- ./ml/coco/yolov4-tiny.cfg
-
-
-
- -
-
-
- Names file
-
-
-
- -
-
-
- ./ml/coco/coco.names
-
-
-
- -
-
-
- Image Width
-
-
-
- -
-
-
- Target
-
-
-
- -
-
-
- Convert 16-bit images to 8-bit
-
-
- true
-
-
-
-
-
- -
-
-
- Qt::Horizontal
-
-
- QDialogButtonBox::Cancel|QDialogButtonBox::Ok
-
-
-
-
-
-
- cfgPathLineEdit
- cfgPathButton
- weightPathLineEdit
- weightPathButton
- namesPathLineEdit
- namesPathButton
-
-
-
-
- buttonBox
- accepted()
- DetectorSetupDialog
- accept()
-
-
- 248
- 254
-
-
- 157
- 274
-
-
-
-
- buttonBox
- rejected()
- DetectorSetupDialog
- reject()
-
-
- 316
- 260
-
-
- 286
- 274
-
-
-
-
-
->>>>>>> dev