-
Notifications
You must be signed in to change notification settings - Fork 6
/
Convolutional.cpp
61 lines (45 loc) · 2.55 KB
/
Convolutional.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#include <torch/script.h> // One-stop header.
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include "Convolutional.h"
using namespace std;
void Convolutional::load_model(char *modelpath) {
//torch::jit::script::Module module = torch::jit::load("../model.pt");
//this->module = torch::jit::load("../model.pt");
this->module = torch::jit::load(modelpath);//../model_40_epochs.pt");
}
//tsekare edw:https://github.com/pytorch/pytorch/issues/12506
//https://gist.github.com/zeryx/526dbc05479e166ca7d512a670e6b82d
//https://discuss.pytorch.org/t/libtorch-c-convert-a-tensor-to-cv-mat-single-channel/47701/6
//permute:https://stackoverflow.com/questions/51143206/difference-between-tensor-permute-and-tensor-view-in-pytorch
//efficient push_back:https://en.cppreference.com/w/cpp/utility/move
void Convolutional::predict(cv::Mat imgCV,float *poseF,float *gaze) {
//this->module = torch::jit::load("../model.pt");
imgCV.convertTo(imgCV, CV_32F, 1.0 / 255, 0);
std::vector<torch::jit::IValue> inputs;
torch::Tensor pose= torch::rand({1,2});
//cout << "size:" << img.sizes() << endl;//size:[1, 36, 60, 1]
//img = img.permute({0, 3, 1, 2});
//prosoxh! Apo torch::kU8, egine: torch::kFloat32, logw tou Normalization me to 1/255
torch::Tensor img = torch::from_blob(imgCV.data, {1, 1,imgCV.rows,imgCV.cols},torch::kFloat32);//torch::Tensor img= torch::from_blob(imgCV.ptr<float>(),{imgCV.rows,imgCV.cols});//
//std::cout << img.slice(/*dim=*/1, /*start=*/0, /*end=*/1) << '\n';//3.2470e-09
//std::vector<int64_t> sizes = {1, 1, imgCV.rows, imgCV.cols};
//at::TensorOptions options(at::ScalarType::Byte);
//at::Tensor img = torch::from_blob(imgCV.data, at::IntList(sizes), options);
pose[0][0]=poseF[0];pose[0][1]=poseF[1];//int a = img[0][0].item<int>();
//inputs.push_back(torch::zeros({1,1,60, 36})) ;//inputs.emplace_back(img);//inputs.push_back(img);
inputs.push_back(img);
inputs.push_back(pose);
torch::Tensor output = (this->module).forward(inputs).toTensor();
gaze[0] = output[0][0].item<float>();
gaze[1] = output[0][1].item<float>();
//cout << "gaze_n:("<<gaze[0]* 180.0/M_PI<<","<<gaze[1]* 180.0/M_PI <<")"<<endl;
cv::Size s = imgCV.size();
//int rows = s.height;
//int cols = s.width;
//cout << "image dims:(rows=" << rows<<",cols="<<cols<<")"<<endl;
//img = output.data;
//std::cout << img.slice(/*dim=*/1, /*start=*/0, /*end=*/5) << '\n';//3.2470e-09
//cout << "real img[0][0] is:" << output.data[0] << endl;
//cout << "tensor img[0][0] is:" << img[0][0][0][0].item<unsigned char>()-' ' << endl;
}