Skip to content

Commit

Permalink
dataloader & transformer update
Browse files Browse the repository at this point in the history
  • Loading branch information
yester31 committed Aug 16, 2021
1 parent ad5d2e0 commit e0ae71b
Show file tree
Hide file tree
Showing 14 changed files with 430 additions and 244 deletions.
75 changes: 34 additions & 41 deletions DL_LAYER/Activation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,33 +7,27 @@

enum act_func{Linear, Sigmoid, Tanh, ReLU, Swish, LeakRelu};

template <typename T>
T act_Sigmoid(T x) {
float act_Sigmoid(float x) {
return (1.f / (exp(-x) + 1.f));
}

template <typename T>
T act_Tanh(T x) {
float act_Tanh(float x) {
return tanh(x);
}

template <typename T>
T act_ReLU(T x) {
float act_ReLU(float x) {
return (x >= 0 ? x : 0);
}

template <typename T>
T act_Swish(T x) {
float act_Swish(float x) {
return (x * act_Sigmoid(x));
}

template <typename T>
T act_ReakyRelu(T x, T a = 0.01) {
float act_ReakyRelu(float x, float a = 0.01) {
return (x >= 0 ? x : a * x);
}

template <typename T>
const T ActivationMode(const T x, act_func Mode, const T a)
const float ActivationMode(const float x, act_func Mode, const float a)
{
switch (Mode)
{
Expand All @@ -57,8 +51,7 @@ const T ActivationMode(const T x, act_func Mode, const T a)
}
}

template <typename T>
void activation(vector<T>& Output, vector<T>& Input, act_func mode = Linear, const T a = 0.01) {
void activation(vector<float>& Output, vector<float>& Input, act_func mode = Linear, const float a = 0.01) {
cout << "===== activation ===== \n";
Output.resize(Input.size());

Expand All @@ -69,30 +62,30 @@ void activation(vector<T>& Output, vector<T>& Input, act_func mode = Linear, con
}


int activateion_test()
{
vector<float> input(10);
vector<float> output(10);

//initTensor(input, 1.f, 1.f);
initTensor(input, "random");

//activation(output, input, act_func::Sigmoid);
//activation(output, input, act_func::Tanh);
//activation(output, input, act_func::ReLU);
//activation(output, input, act_func::Swish);
//activation(output, input, act_func::LeakRelu);
activation(output, input, act_func::Linear);

valueCheck(output);

//tofile(output, "../Calc_Validation/output/C_Tensor_A_Sigmoid");
//tofile(output, "../Calc_Validation/output/C_Tensor_A_Tanh");
//tofile(output, "../Calc_Validation/output/C_Tensor_A_ReLU");
//tofile(output, "../Calc_Validation/output/C_Tensor_A_Swish");
//tofile(output, "../Calc_Validation/output/C_Tensor_A_LeakRelu");



return 0;
}
//int activateion_test()
//{
// vector<float> input(10);
// vector<float> output(10);
//
// //initTensor(input, 1.f, 1.f);
// initTensor(input, "random");
//
// //activation(output, input, act_func::Sigmoid);
// //activation(output, input, act_func::Tanh);
// //activation(output, input, act_func::ReLU);
// //activation(output, input, act_func::Swish);
// //activation(output, input, act_func::LeakRelu);
// activation(output, input, act_func::Linear);
//
// valueCheck(output);
//
// //tofile(output, "../Calc_Validation/output/C_Tensor_A_Sigmoid");
// //tofile(output, "../Calc_Validation/output/C_Tensor_A_Tanh");
// //tofile(output, "../Calc_Validation/output/C_Tensor_A_ReLU");
// //tofile(output, "../Calc_Validation/output/C_Tensor_A_Swish");
// //tofile(output, "../Calc_Validation/output/C_Tensor_A_LeakRelu");
//
//
//
// return 0;
//}
27 changes: 14 additions & 13 deletions DL_LAYER/Conv2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,28 +3,30 @@
/***************************************************************************
Conventional Convolution algotirhm (without any option)
****************************************************************************/
#include "Utils.cpp"
#include "Utils.h"

// layer(functional) class 필요 ***
template <typename T>
Tensor<T> convolution(Tensor<T> &inTensor, int KH, int KW, int stride, int OC) {
Tensor convolution(Tensor &inTensor, int KH, int KW, int stride, int OC) {

// 1. weight 존재 유무 체크 없으면 -> 초기화 or 주입(전달)
Tensor<T> wTensor;
Tensor wTensor = Tensor(1,2,3,4);
// weight 초기화함수 필요 ***


float* weight = wTensor.getData();

// 2. input tensor 체크
vector<T> *input = inTensor.data.data();
int IN = inTensor.shape[0];
int IC = inTensor.shape[1];
int IH = inTensor.shape[2];
int IW = inTensor.shape[3];
float* input = inTensor.getData();
int IN = inTensor.getShape()[0];
int IC = inTensor.getShape()[1];
int IH = inTensor.getShape()[2];
int IW = inTensor.getShape()[3];

// 3. output tenosr 생성
int OH = ((IH - KH) / stride) + 1;
int OW = ((IW - KW) / stride) + 1;
Tensor<T> outTensor(IN, OC, OH, OW);
vector<T> *output = outTensor.data.data();
Tensor outTensor(IN, OC, OH, OW);
float* output = outTensor.getData();

// 4. 연산 수행
std::cout << "===== Convolution ===== \n" << std::endl;
Expand Down Expand Up @@ -65,8 +67,7 @@ Tensor<T> convolution(Tensor<T> &inTensor, int KH, int KW, int stride, int OC) {
return outTensor;
}

template <typename T>
void convolution(vector<float>& output, vector<T>& input, vector<float>& weight, int KH, int KW, int stride, int IN, int IC, int IH, int IW, int OC) {
void convolution(vector<float>& output, vector<float>& input, vector<float>& weight, int KH, int KW, int stride, int IN, int IC, int IH, int IW, int OC) {
int OH = ((IH - KH) / stride) + 1;
int OW = ((IW - KW) / stride) + 1;
std::cout << "===== Convolution ===== \n" << std::endl;
Expand Down
11 changes: 10 additions & 1 deletion DL_LAYER/DL_LAYER.vcxproj
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,13 @@
<PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
<MinimalRebuild>true</MinimalRebuild>
<AdditionalIncludeDirectories>$(OPENCV_HOME)/include</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>DebugFastLink</GenerateDebugInformation>
<AdditionalLibraryDirectories>$(OPENCV_HOME)\x64\vc15\lib</AdditionalLibraryDirectories>
<AdditionalDependencies>opencv_world345d.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
Expand Down Expand Up @@ -143,25 +146,31 @@
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>$(OPENCV_HOME)\include</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>opencv_world345.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(OPENCV_HOME)\x64\vc15\lib</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="Activation.cpp" />
<ClCompile Include="Conv2d.cpp" />
<ClCompile Include="DataLoader.cpp" />
<ClCompile Include="Eltwise.cpp" />
<ClCompile Include="FullyConnected.cpp" />
<ClCompile Include="Pool2d.cpp" />
<ClCompile Include="SoftMax.cpp" />
<ClCompile Include="Utils.cpp">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="WeightInitializer.cpp" />
<ClCompile Include="WeightInitializer.cpp">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="ZeroPad.cpp" />
</ItemGroup>
<ItemGroup>
Expand Down
3 changes: 3 additions & 0 deletions DL_LAYER/DL_LAYER.vcxproj.filters
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,9 @@
<ClCompile Include="WeightInitializer.cpp">
<Filter>소스 파일</Filter>
</ClCompile>
<ClCompile Include="DataLoader.cpp">
<Filter>소스 파일</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Utils.h">
Expand Down
149 changes: 149 additions & 0 deletions DL_LAYER/DataLoader.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
#include <io.h>
#include <iostream>
#include <string>
#include <vector>
#include "Utils.h"
#include "opencv2/opencv.hpp"

using namespace cv;
using namespace std;

//파일 이름 가져오기(DFS) window용
int SearchFile(const std::string& folder_path, std::vector<std::string> &file_names, bool recursive = false)
{
_finddata_t file_info;
string any_file_pattern = folder_path + "\\*";
intptr_t handle = _findfirst(any_file_pattern.c_str(), &file_info);

if (handle == -1)
{
cerr << "folder path not exist: " << folder_path << endl;
return -1;
}

do
{
string file_name = file_info.name;
if (recursive) {
if (file_info.attrib & _A_SUBDIR)//check whtether it is a sub direcotry or a file
{
if (file_name != "." && file_name != "..")
{
string sub_folder_path = folder_path + "//" + file_name;
SearchFile(sub_folder_path, file_names);
cout << "a sub_folder path: " << sub_folder_path << endl;
}
}
else
{
string file_path = folder_path + "/" + file_name;
file_names.push_back(file_path);
}
}
else {
if (!(file_info.attrib & _A_SUBDIR))//check whtether it is a sub direcotry or a file
{
string file_path = folder_path + "/" + file_name;
file_names.push_back(file_path);
}
}
} while (_findnext(handle, &file_info) == 0);
_findclose(handle);
return 0;
}

// linux용
//#include <dirent.h>
//static inline int read_files_in_dir(const char *p_dir_name, std::vector<std::string> &file_names) {
// DIR *p_dir = opendir(p_dir_name);
// if (p_dir == nullptr) {
// return -1;
// }
// struct dirent* p_file = nullptr;
// while ((p_file = readdir(p_dir)) != nullptr) {
// if (strcmp(p_file->d_name, ".") != 0 &&
// strcmp(p_file->d_name, "..") != 0) {
// std::string cur_file_name(p_file->d_name);
// file_names.push_back(cur_file_name);
// }
// }
// closedir(p_dir);
// return 0;
//}
//std::string img_dir;
//std::vector<std::string> file_names;
//read_files_in_dir(img_dir.c_str(), file_names);


// BGR -> RGB, NHWC->NCHW, Normalize
//void transformer(vector<uint8_t> input, string cvt, string transpose, string normalize)
//{
// if (cvt == "BGR2RGB" || cvt == "RGB2BGR") { // CH seq change
//
// }
// else if (cvt == "BGR2BGR" || cvt == "RGB2RGB") { // NO CHANAGE
//
// }
// else if (cvt == "BGR2GREY" || cvt == "RGB2GRAY") {// 3CH -> 1CH
// }
// else { // error
// }
//}


int main()
{
// 0. 이미지경로 로드
std::string img_dir = "../data";
std::vector<std::string> file_names;
if (SearchFile(img_dir.c_str(), file_names) < 0) {
std::cerr << "data search error" << std::endl;
}

// 1. 이미지 데이터 로드
int batch_size = 1;
int input_width = 640;
int input_height = 640;
Mat img(input_height, input_width, CV_8UC3);
Mat ori_img;
vector<uint8_t> input(batch_size * input_height * input_width * 3);
for (int idx = 0; idx < file_names.size(); idx++) {
Mat ori_img = imread(file_names[idx]);
resize(ori_img, img, img.size());
memcpy(input.data(), img.data, batch_size * input_height * input_width * 3);
}

// 2. Tensor 데이터 전달(uint8_t -> float) BGR, NHWC, (0~255)
Tensor input_t(batch_size, input_height, input_width, 3);
for (int idx = 0; idx < input_t.getTotSize();idx++) {
input_t.getData()[idx] = static_cast <float>(input[idx]);
}

// 3. BGR -> RGB, NHWC->NCHW, Normalize (-1 ~ 1)
vector<float> output(input_t.getTotSize());
int IN = input_t.getShape()[0];
int IH = input_t.getShape()[1];
int IW = input_t.getShape()[2];
int IC = input_t.getShape()[3];
int C_offset, H_offset, W_offset, g_in, g_out;
int N_offset = IH * IW * IC;
for (int ⁠n_idx = 0; ⁠n_idx < IN; ⁠n_idx++) {
H_offset = ⁠n_idx * N_offset;
for (int ⁠h_idx = 0; ⁠h_idx < IH; ⁠h_idx++) {
W_offset = ⁠h_idx * IW * IC + H_offset;
for (int w_idx = 0; w_idx < IW; w_idx++) {
C_offset = w_idx * IC + W_offset;
for (int ⁠c_idx = 0; ⁠c_idx < IC; ⁠c_idx++) {
g_in = C_offset + 2 - ⁠c_idx;
g_out = H_offset + ⁠c_idx * IH * IW + ⁠h_idx * IW + w_idx;
output[g_out] = input_t.getData()[g_in] / 127.5f - 1.f;
}
}
}
}
memcpy(input_t.getData(), output.data(), batch_size * input_height * input_width * 3);



return 0;
}
7 changes: 3 additions & 4 deletions DL_LAYER/Eltwise.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@

#include "Utils.h"

template <typename T>
void eltwiseSum(vector<T>& output, vector<T>& input1, vector<T>& input2)
void eltwiseSum(vector<float>& output, vector<float>& input1, vector<float>& input2)
{
std::cout << "===== eltwiseSum func =====" << std::endl;
assert(input1.size() == input2.size());
Expand All @@ -17,8 +16,8 @@ void eltwiseSum(vector<T>& output, vector<T>& input1, vector<T>& input2)
output[i] = input1[i] + input2[i];
}
}
template <typename T>
void eltwiseProd(vector<T>& output, vector<T>& input1, vector<T>& input2)

void eltwiseProd(vector<float>& output, vector<float>& input1, vector<float>& input2)
{
std::cout << "===== eltwiseProd func =====" << std::endl;
assert(input1.size() == input2.size());
Expand Down
Loading

0 comments on commit e0ae71b

Please sign in to comment.