From 70c07970fbbc10eb73f680bc8b18f80e54950334 Mon Sep 17 00:00:00 2001 From: yeahoool Date: Fri, 16 Aug 2024 01:25:53 +0800 Subject: [PATCH] Neural networks with multiple hidden layers (up to 3 layers) How to use it: Set the neuron directly in the nep.in file, for example neuron 25 25 --- src/main_nep/fitness.cu | 8 ++- src/main_nep/nep3.cu | 109 ++++++++++++++++++++------------ src/main_nep/nep3.cuh | 19 +++--- src/main_nep/parameters.cu | 58 +++++++++++++++-- src/main_nep/parameters.cuh | 5 +- src/main_nep/snes.cu | 26 ++++++-- src/utilities/nep_utilities.cuh | 72 +++++++++++++++++++++ 7 files changed, 235 insertions(+), 62 deletions(-) diff --git a/src/main_nep/fitness.cu b/src/main_nep/fitness.cu index 539ba4407..02fa78616 100644 --- a/src/main_nep/fitness.cu +++ b/src/main_nep/fitness.cu @@ -333,7 +333,13 @@ void Fitness::write_nep_txt(FILE* fid_nep, Parameters& para, float* elite) fprintf(fid_nep, "basis_size %d %d\n", para.basis_size_radial, para.basis_size_angular); fprintf(fid_nep, "l_max %d %d %d\n", para.L_max, para.L_max_4body, para.L_max_5body); - fprintf(fid_nep, "ANN %d %d\n", para.num_neurons1, 0); + if (para.num_neurons3 != 0) { + fprintf(fid_nep, "ANN %d %d %d %d\n", para.num_neurons1, para.num_neurons2, para.num_neurons3, 0); + } else if (para.num_neurons2 != 0) { + fprintf(fid_nep, "ANN %d %d %d\n", para.num_neurons1, para.num_neurons2, 0); + } else { + fprintf(fid_nep, "ANN %d %d\n", para.num_neurons1, 0); + } for (int m = 0; m < para.number_of_variables; ++m) { fprintf(fid_nep, "%15.7e\n", elite[m]); } diff --git a/src/main_nep/nep3.cu b/src/main_nep/nep3.cu index e3e71a3e0..60a86926a 100644 --- a/src/main_nep/nep3.cu +++ b/src/main_nep/nep3.cu @@ -290,8 +290,11 @@ NEP3::NEP3( for (int device_id = 0; device_id < deviceCount; device_id++) { cudaSetDevice(device_id); annmb[device_id].dim = para.dim; - annmb[device_id].num_neurons1 = para.num_neurons1; + annmb[device_id].num_neurons[0] = para.num_neurons1; + annmb[device_id].num_neurons[1] = para.num_neurons2; + annmb[device_id].num_neurons[2] = para.num_neurons3; annmb[device_id].num_para = para.number_of_variables; + annmb[device_id].num_hidden_layers = para.num_hidden_layers; nep_data[device_id].NN_radial.resize(N); nep_data[device_id].NN_angular.resize(N); @@ -315,31 +318,35 @@ void NEP3::update_potential(Parameters& para, float* parameters, ANN& ann) float* pointer = parameters; for (int t = 0; t < paramb.num_types; ++t) { if (t > 0 && paramb.version != 4) { // Use the same set of NN parameters for NEP3 - pointer -= (ann.dim + 2) * ann.num_neurons1; + pointer -= (ann.dim + 2) * ann.num_neurons[0]; } - ann.w0[t] = pointer; - pointer += ann.num_neurons1 * ann.dim; - ann.b0[t] = pointer; - pointer += ann.num_neurons1; - ann.w1[t] = pointer; - pointer += ann.num_neurons1; + for (int i = 0; i < ann.num_hidden_layers; ++i) { + ann.w[t][i] = pointer; + pointer += (i == 0 ? ann.dim : ann.num_neurons[i-1]) * ann.num_neurons[i]; + ann.b[t][i] = pointer; + pointer += ann.num_neurons[i]; + } + ann.w_out[t] = pointer; + pointer += ann.num_neurons[ann.num_hidden_layers - 1]; } - ann.b1 = pointer; + ann.b_out = pointer; pointer += 1; if (para.train_mode == 2) { for (int t = 0; t < paramb.num_types; ++t) { if (t > 0 && paramb.version != 4) { // Use the same set of NN parameters for NEP3 - pointer -= (ann.dim + 2) * ann.num_neurons1; + pointer -= (ann.dim + 2) * ann.num_neurons[0]; + } + for (int i = 0; i < ann.num_hidden_layers; ++i) { + ann.w_pol[t][i] = pointer; + pointer += (i == 0 ? ann.dim : ann.num_neurons[i-1]) * ann.num_neurons[i]; + ann.b_pol[t][i] = pointer; + pointer += ann.num_neurons[i]; } - ann.w0_pol[t] = pointer; - pointer += ann.num_neurons1 * ann.dim; - ann.b0_pol[t] = pointer; - pointer += ann.num_neurons1; - ann.w1_pol[t] = pointer; - pointer += ann.num_neurons1; + ann.w_out_pol[t] = pointer; + pointer += ann.num_neurons[ann.num_hidden_layers - 1]; } - ann.b1_pol = pointer; + ann.b_out_pol = pointer; pointer += 1; } @@ -406,13 +413,18 @@ static __global__ void apply_ann( } // get energy and energy gradient float F = 0.0f, Fp[MAX_DIM] = {0.0f}; - apply_ann_one_layer( + apply_ann_multi_layers( annmb.dim, - annmb.num_neurons1, - annmb.w0[type], - annmb.b0[type], - annmb.w1[type], - annmb.b1, + annmb.num_hidden_layers, + annmb.num_neurons, + annmb.w[type][0], + annmb.w[type][1], + annmb.w[type][2], + annmb.b[type][0], + annmb.b[type][1], + annmb.b[type][2], + annmb.w_out[type], + annmb.b_out, q, F, Fp); @@ -446,13 +458,18 @@ static __global__ void apply_ann_pol( float F = 0.0f, Fp[MAX_DIM] = {0.0f}; // scalar part - apply_ann_one_layer( + apply_ann_multi_layers( annmb.dim, - annmb.num_neurons1, - annmb.w0_pol[type], - annmb.b0_pol[type], - annmb.w1_pol[type], - annmb.b1_pol, + annmb.num_hidden_layers, + annmb.num_neurons, + annmb.w_pol[type][0], + annmb.w_pol[type][1], + annmb.w_pol[type][2], + annmb.b_pol[type][0], + annmb.b_pol[type][1], + annmb.b_pol[type][2], + annmb.w_out_pol[type], + annmb.b_out_pol, q, F, Fp); @@ -464,13 +481,18 @@ static __global__ void apply_ann_pol( for (int d = 0; d < annmb.dim; ++d) { Fp[d] = 0.0f; } - apply_ann_one_layer( + apply_ann_multi_layers( annmb.dim, - annmb.num_neurons1, - annmb.w0[type], - annmb.b0[type], - annmb.w1[type], - annmb.b1, + annmb.num_hidden_layers, + annmb.num_neurons, + annmb.w[type][0], + annmb.w[type][1], + annmb.w[type][2], + annmb.b[type][0], + annmb.b[type][1], + annmb.b[type][2], + annmb.w_out[type], + annmb.b_out, q, F, Fp); @@ -505,13 +527,18 @@ static __global__ void apply_ann_temperature( q[annmb.dim - 1] = temperature * g_q_scaler[annmb.dim - 1]; // get energy and energy gradient float F = 0.0f, Fp[MAX_DIM] = {0.0f}; - apply_ann_one_layer( + apply_ann_multi_layers( annmb.dim, - annmb.num_neurons1, - annmb.w0[type], - annmb.b0[type], - annmb.w1[type], - annmb.b1, + annmb.num_hidden_layers, + annmb.num_neurons, + annmb.w[type][0], + annmb.w[type][1], + annmb.w[type][2], + annmb.b[type][0], + annmb.b[type][1], + annmb.b[type][2], + annmb.w_out[type], + annmb.b_out, q, F, Fp); diff --git a/src/main_nep/nep3.cuh b/src/main_nep/nep3.cuh index b06daeeaf..7188a6538 100644 --- a/src/main_nep/nep3.cuh +++ b/src/main_nep/nep3.cuh @@ -66,17 +66,18 @@ public: struct ANN { int dim = 0; // dimension of the descriptor - int num_neurons1 = 0; // number of neurons in the hidden layer + int num_hidden_layers = 1; // number of hidden layers (1 to 3) + int num_neurons[3] = {0, 0, 0}; // number of neurons in the hidden layer int num_para = 0; // number of parameters - const float* w0[NUM_ELEMENTS]; // weight from the input layer to the hidden layer - const float* b0[NUM_ELEMENTS]; // bias for the hidden layer - const float* w1[NUM_ELEMENTS]; // weight from the hidden layer to the output layer - const float* b1; // bias for the output layer + const float* w[50][3]; // weights for each layer + const float* b[50][3]; // biases for each layer + const float* w_out[50]; // weight from the hidden layer to the output layer + const float* b_out; // bias for the output layer // for the scalar part of polarizability - const float* w0_pol[10]; // weight from the input layer to the hidden layer - const float* b0_pol[10]; // bias for the hidden layer - const float* w1_pol[10]; // weight from the hidden layer to the output layer - const float* b1_pol; // bias for the output layer + const float* w_pol[10][3]; // weight from the input layer to the hidden layer + const float* b_pol[10][3]; // bias for the hidden layer + const float* w_out_pol[10]; // weight from the hidden layer to the output layer + const float* b_out_pol; // bias for the output layer // for elements in descriptor const float* c; }; diff --git a/src/main_nep/parameters.cu b/src/main_nep/parameters.cu index 47e7e1f43..5dd84699e 100644 --- a/src/main_nep/parameters.cu +++ b/src/main_nep/parameters.cu @@ -87,7 +87,10 @@ void Parameters::set_default_parameters() L_max = 4; // the only supported value L_max_4body = 2; // default is to include 4body L_max_5body = 0; // default is not to include 5body + num_hidden_layers = 1; // default is to have one hidden layer num_neurons1 = 30; // a relatively small value to achieve high speed + num_neurons2 = 0; // default is not to have the 2nd hidden layer + num_neurons3 = 0; // default is not to have the 3rd hidden layer lambda_1 = lambda_2 = -1.0f; // automatic regularization lambda_e = lambda_f = 1.0f; // energy and force are more important lambda_v = 0.1f; // virial is less important @@ -185,7 +188,13 @@ void Parameters::calculate_parameters() } #endif - number_of_variables_ann = (dim + 2) * num_neurons1 * (version == 4 ? num_types : 1) + 1; + if (num_hidden_layers == 1) { + number_of_variables_ann = (dim + 2) * num_neurons1 * (version == 4 ? num_types : 1) + 1; + } else if (num_hidden_layers == 2) { + number_of_variables_ann = ((dim + 1) * num_neurons1 + (num_neurons1 + 2) * num_neurons2) * (version == 4 ? num_types : 1) + 1; + } else { + number_of_variables_ann = ((dim + 1) * num_neurons1 + (num_neurons1 + 1) * num_neurons2 + (num_neurons2 + 2) * num_neurons3) * (version == 4 ? num_types : 1) + 1; + } number_of_variables_descriptor = num_types * num_types * @@ -343,9 +352,15 @@ void Parameters::report_inputs() } if (is_neuron_set) { - printf(" (input) number of neurons = %d.\n", num_neurons1); + printf(" (input) number of hiden layers = %d.\n", num_hidden_layers); + printf(" (input-1st) number of neurons = %d.\n", num_neurons1); + printf(" (input-2nd) number of neurons = %d.\n", num_neurons2); + printf(" (input-3rd) number of neurons = %d.\n", num_neurons3); } else { - printf(" (default) number of neurons = %d.\n", num_neurons1); + printf(" (default) number of hiden layers = %d.\n", num_hidden_layers); + printf(" (default-1st) number of neurons = %d.\n", num_neurons1); + printf(" (default-2nd) number of neurons = %d.\n", num_neurons2); + printf(" (default-3rd) number of neurons = %d.\n", num_neurons3); } if (is_lambda_1_set) { @@ -416,7 +431,13 @@ void Parameters::report_inputs() printf(" number of radial descriptor components = %d.\n", dim_radial); printf(" number of angular descriptor components = %d.\n", dim_angular); printf(" total number of descriptor components = %d.\n", dim); - printf(" NN architecture = %d-%d-1.\n", dim, num_neurons1); + if (num_neurons3 != 0) { + printf(" NN architecture = %d-%d-%d-%d-1.\n", dim, num_neurons1, num_neurons2, num_neurons3); + } else if (num_neurons2 != 0) { + printf(" NN architecture = %d-%d-%d-1.\n", dim, num_neurons1, num_neurons2); + } else { + printf(" NN architecture = %d-%d-1.\n", dim, num_neurons1); + } printf( " number of NN parameters to be optimized = %d.\n", number_of_variables_ann * (train_mode == 2 ? 2 : 1)); @@ -747,9 +768,12 @@ void Parameters::parse_neuron(const char** param, int num_param) { is_neuron_set = true; - if (num_param != 2) { - PRINT_INPUT_ERROR("neuron should have 1 parameter.\n"); + if (num_param < 2 || num_param > 4) { + PRINT_INPUT_ERROR("neuron should have 1 to 3 parameters.\n"); } + + num_hidden_layers = num_param - 1; + if (!is_valid_int(param[1], &num_neurons1)) { PRINT_INPUT_ERROR("number of neurons should be an integer.\n"); } @@ -758,6 +782,28 @@ void Parameters::parse_neuron(const char** param, int num_param) } else if (num_neurons1 > 200) { PRINT_INPUT_ERROR("number of neurons should <= 200."); } + + if (num_param > 2) { + if (!is_valid_int(param[2], &num_neurons2)) { + PRINT_INPUT_ERROR("number of neurons should be an integer.\n"); + } + if (num_neurons2 < 1) { + PRINT_INPUT_ERROR("number of neurons should >= 1."); + } else if (num_neurons2 > 200) { + PRINT_INPUT_ERROR("number of neurons should <= 200."); + } + } + + if (num_param > 3) { + if (!is_valid_int(param[3], &num_neurons3)) { + PRINT_INPUT_ERROR("number of neurons should be an integer.\n"); + } + if (num_neurons3 < 1) { + PRINT_INPUT_ERROR("number of neurons should >= 1."); + } else if (num_neurons3 > 200) { + PRINT_INPUT_ERROR("number of neurons should <= 200."); + } + } } void Parameters::parse_lambda_1(const char** param, int num_param) diff --git a/src/main_nep/parameters.cuh b/src/main_nep/parameters.cuh index 97d2ca0ba..af8d329b7 100644 --- a/src/main_nep/parameters.cuh +++ b/src/main_nep/parameters.cuh @@ -30,7 +30,10 @@ public: int num_types; // number of atom types int population_size; // population size for SNES int maximum_generation; // maximum number of generations for SNES; - int num_neurons1; // number of nuerons in the 1st hidden layer (only one hidden layer) + int num_hidden_layers; // number of hidden layers + int num_neurons1; // number of nuerons in the 1st hidden layer + int num_neurons2; // number of nuerons in the 2st hidden layer + int num_neurons3; // number of nuerons in the 3st hidden layer int basis_size_radial; // for nep3 int basis_size_angular; // for nep3 int n_max_radial; // maximum order of the radial Chebyshev polynomials diff --git a/src/main_nep/snes.cu b/src/main_nep/snes.cu index 031ca29c8..cc970b402 100644 --- a/src/main_nep/snes.cu +++ b/src/main_nep/snes.cu @@ -136,15 +136,33 @@ void SNES::find_type_of_variable(Parameters& para) int num_ann = (para.train_mode == 2) ? 2 : 1; for (int ann = 0; ann < num_ann; ++ann) { for (int t = 0; t < para.num_types; ++t) { - for (int n = 0; n < (para.dim + 2) * para.num_neurons1; ++n) { - type_of_variable[n + offset] = t; + if (para.num_hidden_layers == 1) { + for (int n = 0; n < (para.dim + 2) * para.num_neurons1; ++n) { + type_of_variable[n + offset] = t; + } + offset += (para.dim + 2) * para.num_neurons1; + } else if (para.num_hidden_layers == 2) { + for (int n = 0; n < ((para.dim + 1) * para.num_neurons1 + (para.num_neurons1 + 2) * para.num_neurons2); ++n) { + type_of_variable[n + offset] = t; + } + offset += (para.dim + 1) * para.num_neurons1 + (para.num_neurons1 + 2) * para.num_neurons2; + } else { + for (int n = 0; n < ((para.dim + 1) * para.num_neurons1 + (para.num_neurons1 + 1) * para.num_neurons2 + (para.num_neurons2 + 2) * para.num_neurons3); ++n) { + type_of_variable[n + offset] = t; + } + offset += (para.dim + 1) * para.num_neurons1 + (para.num_neurons1 + 1) * para.num_neurons2 + (para.num_neurons2 + 2) * para.num_neurons3; } - offset += (para.dim + 2) * para.num_neurons1; } ++offset; // the bias } } else { - offset += (para.dim + 2) * para.num_neurons1 + 1; + if (para.num_hidden_layers == 1) { + offset += (para.dim + 2) * para.num_neurons1 + 1; + } else if (para.num_hidden_layers == 2) { + offset += (para.dim + 1) * para.num_neurons1 + (para.num_neurons1 + 2) * para.num_neurons2 + 1; + } else { + offset += (para.dim + 1) * para.num_neurons1 + (para.num_neurons1 + 1) * para.num_neurons2 + (para.num_neurons2 + 2) * para.num_neurons3 + 1; + } } // descriptor part diff --git a/src/utilities/nep_utilities.cuh b/src/utilities/nep_utilities.cuh index 4b5d764fb..85825c43b 100644 --- a/src/utilities/nep_utilities.cuh +++ b/src/utilities/nep_utilities.cuh @@ -75,6 +75,78 @@ static __device__ void apply_ann_one_layer( energy -= b1[0]; } +static __device__ void apply_ann_multi_layers( + const int N_des, + const int layers, + const int* N_neu, + const float* w0, + const float* w1, + const float* w2, + const float* b0, + const float* b1, + const float* b2, + const float* w_out, + const float* b_out, + float* q, + float& energy, + float* energy_derivative) +{ + constexpr int MAX_NEURONS_PER_LAYER = 200; + float x[3 * MAX_NEURONS_PER_LAYER]; // Maximum number of neurons per layer + float delta[3 * MAX_NEURONS_PER_LAYER]; // error of each neuron + + // input layer + for (int n = 0; n < N_neu[0]; ++n) { + float sum = 0.0f; + for (int d = 0; d < N_des; ++d) { + sum += w0[n * N_des + d] * q[d]; + } + x[n] = tanh(sum - b0[n]); + } + // hidden layers + for (int l = 1; l < layers; ++l) { + const float* w = (l == 1) ? w1 : w2; + const float* b = (l == 1) ? b1 : b2; + for (int n = 0; n < N_neu[l]; ++n) { + float sum = 0.0f; + for (int m = 0; m < N_neu[l - 1]; ++m) { + sum += w[n * N_neu[l - 1] + m] * x[(l - 1) * MAX_NEURONS_PER_LAYER + m]; + } + x[l * MAX_NEURONS_PER_LAYER + n] = tanh(sum - b[n]); + } + } + // output layer + energy = 0.0f; + for (int n = 0; n < N_neu[layers - 1]; ++n) { + float out = x[(layers - 1) * MAX_NEURONS_PER_LAYER + n]; + energy += w_out[n] * out; // w_out_j * x_j^3 + delta[(layers - 1) * MAX_NEURONS_PER_LAYER + n] = w_out[n] * (1.0f - out * out); // delta_j^3 + } + energy -= b_out[0]; + + // Backpropagation error + for (int l = layers - 1; l >= 1; --l) { // l = 2, 1 + const float* w_next = (l == 1) ? w1 : w2; // w2, w1 + for (int m = 0; m < N_neu[l - 1]; ++m) { + float sum = 0.0f; + for (int n = 0; n < N_neu[l]; ++n) { + sum += w_next[n * N_neu[l - 1] + m] * delta[l * MAX_NEURONS_PER_LAYER + n]; + } + float out = x[(l - 1) * MAX_NEURONS_PER_LAYER + m]; + delta[(l - 1) * MAX_NEURONS_PER_LAYER + m] = sum * (1.0f - out * out); + } + } + + // Derivative of the energy + for (int d = 0; d < N_des; ++d) { + energy_derivative[d] = 0.0f; + for (int n = 0; n < N_neu[0]; ++n) { + energy_derivative[d] += w0[n * N_des + d] * delta[n]; + } + } + +} + static __device__ __forceinline__ void find_fc(float rc, float rcinv, float d12, float& fc) { if (d12 < rc) {