From 68e9abae8ca8c8586af91e842961eddaea1d1657 Mon Sep 17 00:00:00 2001 From: mhucka Date: Thu, 12 Dec 2024 04:37:02 +0000 Subject: [PATCH 1/5] Fix type mismatch warnings Many files produced many type mismatch warnings with both clang 17 and gcc 14 on a Debian 6.10.11 x86_64 system. This commit fixes almost all the ones I'm seeing in the TFQ code base. Some other warnings still arise in other code and modules. Those warnings don't appear to be things we can easily work around short of creating a lot of patch files (at significant cost in time and testing). For those, I'm currently relying on setting compiler options (e.g., `-Wno-unused-function` for warnings about unused functions) narrowed in scope to the problematic package using Bazel's `--per_file_copt` and `--host_per_file_copt` flags. Those flags are not in this commit; they need to be made part of the TFQ `configure.sh` script. --- .../core/ops/math_ops/tfq_inner_product.cc | 12 ++++---- .../ops/math_ops/tfq_inner_product_grad.cc | 8 +++--- .../core/ops/noise/tfq_noisy_expectation.cc | 28 +++++++++---------- .../noise/tfq_noisy_sampled_expectation.cc | 28 +++++++++---------- .../core/ops/noise/tfq_noisy_samples.cc | 8 +++--- .../core/ops/tfq_adj_grad_op.cc | 14 +++++----- .../core/ops/tfq_calculate_unitary_op.cc | 4 +-- .../core/ops/tfq_ps_symbol_replace_op.cc | 7 +++-- .../ops/tfq_ps_weights_from_symbols_op.cc | 2 +- .../core/ops/tfq_simulate_expectation_op.cc | 8 +++--- .../tfq_simulate_sampled_expectation_op.cc | 8 +++--- .../core/ops/tfq_simulate_samples_op.cc | 10 +++---- .../core/ops/tfq_simulate_state_op.cc | 6 ++-- tensorflow_quantum/core/src/adj_util.cc | 10 +++---- .../core/src/util_balance_trajectory.cc | 8 +++--- .../core/src/util_balance_trajectory_test.cc | 6 ++-- tensorflow_quantum/core/src/util_qsim.h | 6 ++-- 17 files changed, 86 insertions(+), 87 deletions(-) diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc index 6a5c9db49..df83b6222 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc @@ -179,7 +179,7 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (size_t i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { // need to switch to larger statespace. @@ -191,10 +191,10 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (int j = 0; j < other_fused_circuits[i].size(); j++) { + for (size_t j = 0; j < other_fused_circuits[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { (*output_tensor)(i, j) = std::complex(1, 0); @@ -202,7 +202,7 @@ class TfqInnerProductOp : public tensorflow::OpKernel { } ss.SetStateZero(scratch); - for (int k = 0; k < other_fused_circuits[i][j].size(); k++) { + for (size_t k = 0; k < other_fused_circuits[i][j].size(); k++) { qsim::ApplyFusedGate(sim, other_fused_circuits[i][j][k], scratch); } @@ -260,13 +260,13 @@ class TfqInnerProductOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[cur_batch_index].size(); j++) { + for (size_t j = 0; j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } ss.SetStateZero(scratch); - for (int k = 0; + for (size_t k = 0; k < other_fused_circuits[cur_batch_index][cur_internal_index].size(); k++) { diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc index 198f92c63..b7b5a34aa 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc @@ -62,8 +62,8 @@ class TfqInnerProductGradOp : public tensorflow::OpKernel { // Create the output Tensor. const int output_dim_batch_size = context->input(0).dim_size(0); - const int output_dim_internal_size = context->input(3).dim_size(1); - const int output_dim_symbol_size = context->input(1).dim_size(0); + size_t output_dim_internal_size = context->input(3).dim_size(1); + size_t output_dim_symbol_size = context->input(1).dim_size(0); OP_REQUIRES(context, output_dim_symbol_size > 0, tensorflow::errors::InvalidArgument(absl::StrCat( "The number of symbols must be a positive integer, got ", @@ -403,13 +403,13 @@ class TfqInnerProductGradOp : public tensorflow::OpKernel { // if applicable compute control qubit mask and control value bits. uint64_t mask = 0; uint64_t cbits = 0; - for (int k = 0; k < cur_gate.controlled_by.size(); k++) { + for (size_t k = 0; k < cur_gate.controlled_by.size(); k++) { uint64_t control_loc = cur_gate.controlled_by[k]; mask |= uint64_t{1} << control_loc; cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (int k = 0; + for (size_t k = 0; k < gradient_gates[cur_batch_index][l - 1].grad_gates.size(); k++) { // Copy sv_adj onto scratch2 in anticipation of non-unitary diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc index dabe6ceac..5a13fa1af 100644 --- a/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc +++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc @@ -181,8 +181,8 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_n_shots = 1; - for (int i = 0; i < num_samples.size(); i++) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t i = 0; i < num_samples.size(); i++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { max_n_shots = std::max(max_n_shots, num_samples[i][j]); } } @@ -194,12 +194,12 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < ncircuits.size(); i++) { + for (size_t i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; // (#679) Just ignore empty program if (ncircuits[i].channels.size() == 0) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -226,7 +226,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { sv, unused_stats); // Use this trajectory as a source for all expectation calculations. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { if (run_samples[j] >= num_samples[i][j]) { continue; } @@ -238,14 +238,14 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { run_samples[j]++; } bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { if (run_samples[j] < num_samples[i][j]) { break_loop = false; break; } } if (break_loop) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) = static_cast(rolling_sums[j]); } @@ -286,8 +286,8 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_n_shots = 1; - for (int i = 0; i < num_samples.size(); i++) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t i = 0; i < num_samples.size(); i++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { max_n_shots = std::max(max_n_shots, num_samples[i][j]); } } @@ -310,13 +310,13 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { random_gen.ReserveSamples128(ncircuits.size() * max_n_shots + 1); tensorflow::random::SimplePhilox rand_source(&local_gen); - for (int i = 0; i < ncircuits.size(); i++) { + for (size_t i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; int rep_offset = rep_offsets[start][i]; // (#679) Just ignore empty program if (ncircuits[i].channels.size() == 0) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -343,7 +343,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { sim, sv, unused_stats); // Compute expectations across all ops using this trajectory. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] >= p_reps + rep_offset) { continue; @@ -360,7 +360,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { // Check if we have run enough trajectories for all ops. bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] < p_reps + rep_offset) { break_loop = false; @@ -370,7 +370,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel { if (break_loop) { // Lock writing to this batch index in output_tensor. batch_locks[i].lock(); - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) += static_cast(rolling_sums[j]); } diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc index 66a0e168c..ed59331b1 100644 --- a/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc +++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc @@ -183,8 +183,8 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_psum_length = 1; int max_n_shots = 1; - for (int i = 0; i < pauli_sums.size(); i++) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t i = 0; i < pauli_sums.size(); i++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { max_psum_length = std::max(max_psum_length, pauli_sums[i][j].terms().size()); max_n_shots = std::max(max_n_shots, num_samples[i][j]); @@ -198,12 +198,12 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < ncircuits.size(); i++) { + for (size_t i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; // (#679) Just ignore empty program if (ncircuits[i].channels.empty()) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -230,7 +230,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { sv, unused_stats); // Use this trajectory as a source for all expectation calculations. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { if (run_samples[j] >= num_samples[i][j]) { continue; } @@ -242,14 +242,14 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { run_samples[j]++; } bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { if (run_samples[j] < num_samples[i][j]) { break_loop = false; break; } } if (break_loop) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) = static_cast(rolling_sums[j]); } @@ -291,8 +291,8 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { tensorflow::GuardedPhiloxRandom random_gen; int max_psum_length = 1; int max_n_shots = 1; - for (int i = 0; i < pauli_sums.size(); i++) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t i = 0; i < pauli_sums.size(); i++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { max_psum_length = std::max(max_psum_length, pauli_sums[i][j].terms().size()); max_n_shots = std::max(max_n_shots, num_samples[i][j]); @@ -316,13 +316,13 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { auto local_gen = random_gen.ReserveSamples128(num_rand); tensorflow::random::SimplePhilox rand_source(&local_gen); - for (int i = 0; i < ncircuits.size(); i++) { + for (size_t i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; int rep_offset = rep_offsets[start][i]; // (#679) Just ignore empty program if (ncircuits[i].channels.empty()) { - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; @@ -349,7 +349,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { sim, sv, unused_stats); // Compute expectations across all ops using this trajectory. - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] >= p_reps + rep_offset) { continue; @@ -366,7 +366,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { // Check if we have run enough trajectories for all ops. bool break_loop = true; - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] < p_reps + rep_offset) { break_loop = false; @@ -376,7 +376,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel { if (break_loop) { // Lock writing to this batch index in output_tensor. batch_locks[i].lock(); - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) += static_cast(rolling_sums[j]); } diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc index 78c633f13..a09f826b9 100644 --- a/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc +++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc @@ -160,7 +160,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as nescessary. - for (int i = 0; i < ncircuits.size(); i++) { + for (size_t i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -182,7 +182,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { QTSimulator::RunOnce(param, ncircuits[i], rand_source.Rand64(), ss, sim, sv, gathered_samples); - uint64_t q_ind = 0; + int q_ind = 0; uint64_t mask = 1; bool val = 0; while (q_ind < nq) { @@ -253,7 +253,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { auto local_gen = random_gen.ReserveSamples32(needed_random); tensorflow::random::SimplePhilox rand_source(&local_gen); - for (int i = 0; i < ncircuits.size(); i++) { + for (size_t i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; int j = start > 0 ? offset_prefix_sum[start - 1][i] : 0; int needed_samples = offset_prefix_sum[start][i] - j; @@ -279,7 +279,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel { QTSimulator::RunOnce(param, ncircuits[i], rand_source.Rand64(), ss, sim, sv, gathered_samples); - uint64_t q_ind = 0; + int q_ind = 0; uint64_t mask = 1; bool val = 0; while (q_ind < nq) { diff --git a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc index c96a9cb0d..c65e936d6 100644 --- a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc +++ b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc @@ -212,7 +212,7 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { } ss.SetStateZero(sv); - for (int j = 0; j < full_fuse[i].size(); j++) { + for (size_t j = 0; j < full_fuse[i].size(); j++) { qsim::ApplyFusedGate(sim, full_fuse[i][j], sv); } @@ -241,13 +241,13 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { // if applicable compute control qubit mask and control value bits. uint64_t mask = 0; uint64_t cbits = 0; - for (int k = 0; k < cur_gate.controlled_by.size(); k++) { + for (size_t k = 0; k < cur_gate.controlled_by.size(); k++) { uint64_t control_loc = cur_gate.controlled_by[k]; mask |= uint64_t{1} << control_loc; cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (int k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { + for (size_t k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { // Copy sv onto scratch2 in anticipation of non-unitary "gradient // gate". ss.Copy(sv, scratch2); @@ -307,7 +307,7 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { auto scratch = ss.Create(largest_nq); auto scratch2 = ss.Create(largest_nq); - for (int i = 0; i < partial_fused_circuits.size(); i++) { + for (size_t i = 0; i < partial_fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -324,7 +324,7 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { } ss.SetStateZero(sv); - for (int j = 0; j < full_fuse[i].size(); j++) { + for (size_t j = 0; j < full_fuse[i].size(); j++) { qsim::ApplyFusedGate(sim, full_fuse[i][j], sv); } @@ -352,13 +352,13 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { // if applicable compute control qubit mask and control value bits. uint64_t mask = 0; uint64_t cbits = 0; - for (int k = 0; k < cur_gate.controlled_by.size(); k++) { + for (size_t k = 0; k < cur_gate.controlled_by.size(); k++) { uint64_t control_loc = cur_gate.controlled_by[k]; mask |= uint64_t{1} << control_loc; cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (int k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { + for (size_t k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { // Copy sv onto scratch2 in anticipation of non-unitary "gradient // gate". ss.Copy(sv, scratch2); diff --git a/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc b/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc index ace5327e1..4f1f662ca 100644 --- a/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc +++ b/tensorflow_quantum/core/ops/tfq_calculate_unitary_op.cc @@ -116,7 +116,7 @@ class TfqCalculateUnitaryOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the unitary as nescessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (size_t i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; UCalculator sim = UCalculator(tfq_for); UnitarySpace us = UnitarySpace(tfq_for); @@ -126,7 +126,7 @@ class TfqCalculateUnitaryOp : public tensorflow::OpKernel { u = us.CreateUnitary(nq); } us.SetIdentity(u); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], u); } diff --git a/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc b/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc index 559fbecc9..d5b4ef9a7 100644 --- a/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc +++ b/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc @@ -163,12 +163,13 @@ class TfqPsSymbolReplaceOp : public tensorflow::OpKernel { for (int i = start; i < end; i++) { int sidx = i % n_symbols; int pidx = i / n_symbols; - for (int j = 0; j < output_programs.at(pidx).at(sidx).size(); j++) { + + for (size_t j = 0; j < output_programs.at(pidx).at(sidx).size(); j++) { output_tensor(pidx, sidx, j) = output_programs.at(pidx).at(sidx).at(j); } - for (int j = output_programs.at(pidx).at(sidx).size(); j < biggest_pad; - j++) { + for (size_t j = output_programs.at(pidx).at(sidx).size(); + j < biggest_pad; j++) { output_tensor(pidx, sidx, j) = empty_program; } } diff --git a/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc b/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc index 4a027223e..7ffebfd22 100644 --- a/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc +++ b/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc @@ -146,7 +146,7 @@ class TfqPsWeightsFromSymbolOp : public tensorflow::OpKernel { auto DoWork2 = [&](int start, int end) { for (int i = start; i < end; i++) { for (int j = 0; j < n_symbols; j++) { - for (int k = 0; k < output_results.at(i).at(j).size(); k++) { + for (size_t k = 0; k < output_results.at(i).at(j).size(); k++) { output_tensor(i, j, k) = output_results.at(i).at(j).at(k); } for (int k = output_results.at(i).at(j).size(); diff --git a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc index 7583437ca..6f0561217 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc @@ -148,7 +148,7 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (size_t i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -161,10 +161,10 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { (*output_tensor)(i, j) = -2.0; @@ -226,7 +226,7 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[cur_batch_index].size(); j++) { + for (size_t j = 0; j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } diff --git a/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc index b9f9ee982..552644a43 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc @@ -180,7 +180,7 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (size_t i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -193,10 +193,10 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { // the state if there is a possibility that circuit[i] and // circuit[i + 1] produce the same state. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } - for (int j = 0; j < pauli_sums[i].size(); j++) { + for (size_t j = 0; j < pauli_sums[i].size(); j++) { // (#679) Just ignore empty program if (fused_circuits[i].size() == 0) { (*output_tensor)(i, j) = -2.0; @@ -278,7 +278,7 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel { // no need to update scratch_state since ComputeExpectation // will take care of things for us. ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[cur_batch_index].size(); j++) { + for (size_t j = 0; j < fused_circuits[cur_batch_index].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv); } } diff --git a/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc index 0e68020e9..447f66c70 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_samples_op.cc @@ -154,7 +154,7 @@ class TfqSimulateSamplesOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as nescessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (size_t i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -163,13 +163,13 @@ class TfqSimulateSamplesOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } auto samples = ss.Sample(sv, num_samples, rand_source.Rand32()); for (int j = 0; j < num_samples; j++) { - uint64_t q_ind = 0; + int q_ind = 0; uint64_t mask = 1; bool val = 0; while (q_ind < nq) { @@ -219,13 +219,13 @@ class TfqSimulateSamplesOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } auto samples = ss.Sample(sv, num_samples, rand_source.Rand32()); for (int j = 0; j < num_samples; j++) { - uint64_t q_ind = 0; + int q_ind = 0; uint64_t mask = 1; bool val = 0; while (q_ind < nq) { diff --git a/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc index e659800ce..efbfa44fc 100644 --- a/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc +++ b/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc @@ -136,7 +136,7 @@ class TfqSimulateStateOp : public tensorflow::OpKernel { // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as nescessary. - for (int i = 0; i < fused_circuits.size(); i++) { + for (size_t i = 0; i < fused_circuits.size(); i++) { int nq = num_qubits[i]; if (nq > largest_nq) { @@ -145,7 +145,7 @@ class TfqSimulateStateOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } @@ -194,7 +194,7 @@ class TfqSimulateStateOp : public tensorflow::OpKernel { sv = ss.Create(largest_nq); } ss.SetStateZero(sv); - for (int j = 0; j < fused_circuits[i].size(); j++) { + for (size_t j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv); } diff --git a/tensorflow_quantum/core/src/adj_util.cc b/tensorflow_quantum/core/src/adj_util.cc index ceb76b2c1..e15ff8a8c 100644 --- a/tensorflow_quantum/core/src/adj_util.cc +++ b/tensorflow_quantum/core/src/adj_util.cc @@ -38,7 +38,7 @@ void CreateGradientCircuit( const QsimCircuit& circuit, const std::vector& metadata, std::vector>>* partial_fuses, std::vector* grad_gates) { - for (int i = 0; i < metadata.size(); i++) { + for (size_t i = 0; i < metadata.size(); i++) { if (metadata[i].symbol_values.empty()) { continue; } @@ -78,7 +78,7 @@ void CreateGradientCircuit( // PhasedX else if (circuit.gates[i].kind == qsim::Cirq::GateKind::kPhasedXPowGate) { // Process potentially several symbols. - for (int j = 0; j < metadata[i].symbol_values.size(); j++) { + for (size_t j = 0; j < metadata[i].symbol_values.size(); j++) { if (metadata[i].placeholder_names[j] == GateParamNames::kPhaseExponent) { PopulateGradientPhasedXPhasedExponent( @@ -103,7 +103,7 @@ void CreateGradientCircuit( // Process potentially several symbols. bool swapq = circuit.gates[i].swapped; - for (int j = 0; j < metadata[i].symbol_values.size(); j++) { + for (size_t j = 0; j < metadata[i].symbol_values.size(); j++) { if (metadata[i].placeholder_names[j] == GateParamNames::kTheta) { PopulateGradientFsimTheta( metadata[i].symbol_values[j], i, @@ -128,7 +128,7 @@ void CreateGradientCircuit( qsim::Cirq::GateKind::kPhasedISwapPowGate) { // Process potentially several symbols. bool swapq = circuit.gates[i].swapped; - for (int j = 0; j < metadata[i].symbol_values.size(); j++) { + for (size_t j = 0; j < metadata[i].symbol_values.size(); j++) { if (metadata[i].placeholder_names[j] == GateParamNames::kPhaseExponent) { PopulateGradientPhasedISwapPhasedExponent( @@ -159,7 +159,7 @@ void CreateGradientCircuit( partial_fuses->assign(grad_gates->size() + 1, std::vector>({})); - for (int i = 0; i < grad_gates->size(); i++) { + for (size_t i = 0; i < grad_gates->size(); i++) { right = circuit.gates.begin() + (*grad_gates)[i].index; (*partial_fuses)[i] = fuser.FuseGates(qsim::BasicGateFuser::Parameter(), diff --git a/tensorflow_quantum/core/src/util_balance_trajectory.cc b/tensorflow_quantum/core/src/util_balance_trajectory.cc index 6230e747a..8351e49b1 100644 --- a/tensorflow_quantum/core/src/util_balance_trajectory.cc +++ b/tensorflow_quantum/core/src/util_balance_trajectory.cc @@ -29,13 +29,13 @@ void BalanceTrajectory(const std::vector>& num_samples, std::vector rep_limits(num_samples.size(), -1); std::vector height(num_threads, 0); - for (int i = 0; i < num_samples.size(); i++) { - for (int j = 0; j < num_samples[i].size(); j++) { + for (size_t i = 0; i < num_samples.size(); i++) { + for (size_t j = 0; j < num_samples[i].size(); j++) { rep_limits[i] = std::max(rep_limits[i], num_samples[i][j]); } } int prev_max_height = -1; - for (int j = 0; j < num_samples.size(); j++) { + for (size_t j = 0; j < num_samples.size(); j++) { int run_ceiling = ((rep_limits[j] + num_threads - 1) / num_threads); int num_lo = num_threads * run_ceiling - rep_limits[j]; int num_hi = num_threads - num_lo; @@ -74,7 +74,7 @@ void BalanceTrajectory(const int& num_samples, const int& num_threads, std::vector height(num_threads, 0); int prev_max_height = -1; - for (int j = 0; j < (*thread_offsets)[0].size(); j++) { + for (size_t j = 0; j < (*thread_offsets)[0].size(); j++) { int run_ceiling = ((num_samples + num_threads - 1) / num_threads); int num_lo = num_threads * run_ceiling - num_samples; int num_hi = num_threads - num_lo; diff --git a/tensorflow_quantum/core/src/util_balance_trajectory_test.cc b/tensorflow_quantum/core/src/util_balance_trajectory_test.cc index 1656a9acf..f361f5754 100644 --- a/tensorflow_quantum/core/src/util_balance_trajectory_test.cc +++ b/tensorflow_quantum/core/src/util_balance_trajectory_test.cc @@ -24,13 +24,13 @@ static void AssertWellBalanced(const std::vector>& n_reps, const int& num_threads, const std::vector>& offsets) { auto max_work = std::vector(n_reps.size(), -1); - for (int i = 0; i < n_reps.size(); i++) { - for (int j = 0; j < n_reps[0].size(); j++) { + for (size_t i = 0; i < n_reps.size(); i++) { + for (size_t j = 0; j < n_reps[0].size(); j++) { max_work[i] = std::max(max_work[i], n_reps[i][j]); } } - for (int i = 0; i < n_reps.size(); i++) { + for (size_t i = 0; i < n_reps.size(); i++) { int sum = 0; int prev_local_work = 0; for (int k = 0; k < num_threads; k++) { diff --git a/tensorflow_quantum/core/src/util_qsim.h b/tensorflow_quantum/core/src/util_qsim.h index fc2461acf..6d0bf628c 100644 --- a/tensorflow_quantum/core/src/util_qsim.h +++ b/tensorflow_quantum/core/src/util_qsim.h @@ -427,11 +427,9 @@ tensorflow::Status AccumulateFusedCircuits( tensorflow::Status status = ::tensorflow::Status(); ss.SetAllZeros(dest); - for (std::vector>::size_type i = 0; - i < fused_circuits.size(); i++) { + for (std::vector>::size_type i = 0; i < fused_circuits.size(); i++) { ss.SetStateZero(scratch); - for (std::vector>::size_type j = 0; - j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], scratch); } ss.Multiply(coefficients[i], scratch); From f64f1b340f947a426ff4e0ebe736dacd84790936 Mon Sep 17 00:00:00 2001 From: mhucka Date: Sun, 5 Jan 2025 00:29:45 +0000 Subject: [PATCH 2/5] Fix formatting --- tensorflow_quantum/core/ops/tfq_adj_grad_op.cc | 6 ++++-- tensorflow_quantum/core/src/util_qsim.h | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc index c65e936d6..088c6dcde 100644 --- a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc +++ b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc @@ -247,7 +247,8 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (size_t k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { + for (size_t k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); + k++) { // Copy sv onto scratch2 in anticipation of non-unitary "gradient // gate". ss.Copy(sv, scratch2); @@ -358,7 +359,8 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel { cbits |= ((cur_gate.cmask >> k) & 1) << control_loc; } - for (size_t k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); k++) { + for (size_t k = 0; k < gradient_gates[i][j - 1].grad_gates.size(); + k++) { // Copy sv onto scratch2 in anticipation of non-unitary "gradient // gate". ss.Copy(sv, scratch2); diff --git a/tensorflow_quantum/core/src/util_qsim.h b/tensorflow_quantum/core/src/util_qsim.h index 6d0bf628c..fc2461acf 100644 --- a/tensorflow_quantum/core/src/util_qsim.h +++ b/tensorflow_quantum/core/src/util_qsim.h @@ -427,9 +427,11 @@ tensorflow::Status AccumulateFusedCircuits( tensorflow::Status status = ::tensorflow::Status(); ss.SetAllZeros(dest); - for (std::vector>::size_type i = 0; i < fused_circuits.size(); i++) { + for (std::vector>::size_type i = 0; + i < fused_circuits.size(); i++) { ss.SetStateZero(scratch); - for (std::vector>::size_type j = 0; j < fused_circuits[i].size(); j++) { + for (std::vector>::size_type j = 0; + j < fused_circuits[i].size(); j++) { qsim::ApplyFusedGate(sim, fused_circuits[i][j], scratch); } ss.Multiply(coefficients[i], scratch); From 60dc18e915b36b0be6eb179228b738d8efa406e2 Mon Sep 17 00:00:00 2001 From: mhucka Date: Fri, 10 Jan 2025 00:33:08 +0000 Subject: [PATCH 3/5] Address a few more size_t versus int warnings --- tensorflow_quantum/core/src/circuit_parser_qsim_test.cc | 6 +++--- tensorflow_quantum/core/src/program_resolution.cc | 4 ++-- tensorflow_quantum/core/src/util_qsim_test.cc | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc b/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc index e4d487a85..bdecfe804 100644 --- a/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc +++ b/tensorflow_quantum/core/src/circuit_parser_qsim_test.cc @@ -64,7 +64,7 @@ Arg MakeControlArg(const std::string& val) { } inline void AssertControlEqual(const QsimGate& a, const QsimGate& b) { - for (int i = 0; i < a.controlled_by.size(); i++) { + for (size_t i = 0; i < a.controlled_by.size(); i++) { ASSERT_EQ(a.controlled_by[i], b.controlled_by[i]); } ASSERT_EQ(a.cmask, b.cmask); @@ -89,14 +89,14 @@ inline void AssertOneQubitEqual(const QsimGate& a, const QsimGate& b) { inline void AssertChannelEqual(const QsimChannel& a, const QsimChannel& b) { ASSERT_EQ(a.size(), b.size()); - for (int i = 0; i < a.size(); i++) { + for (size_t i = 0; i < a.size(); i++) { ASSERT_EQ(a[i].kind, b[i].kind); ASSERT_EQ(a[i].unitary, b[i].unitary); ASSERT_NEAR(a[i].prob, b[i].prob, 1e-5); auto a_k_ops = a[i].ops; auto b_k_ops = b[i].ops; EXPECT_EQ(a_k_ops.size(), b_k_ops.size()); - for (int j = 0; j < a_k_ops.size(); j++) { + for (size_t j = 0; j < a_k_ops.size(); j++) { AssertOneQubitEqual(a_k_ops[j], b_k_ops[j]); } } diff --git a/tensorflow_quantum/core/src/program_resolution.cc b/tensorflow_quantum/core/src/program_resolution.cc index 0fbda9368..0e9ef28c2 100644 --- a/tensorflow_quantum/core/src/program_resolution.cc +++ b/tensorflow_quantum/core/src/program_resolution.cc @@ -361,7 +361,7 @@ Status CheckMPSSupported(const Program& program) { control_ids = absl::StrSplit(control_qubits, ','); } } - const int total_num_qubits = qubits.size() + control_ids.size(); + const size_t total_num_qubits = qubits.size() + control_ids.size(); if (total_num_qubits > 2) { return Status( static_cast( @@ -372,7 +372,7 @@ Status CheckMPSSupported(const Program& program) { } if (total_num_qubits == 2) { - int j = 0; + size_t j = 0; std::vector qids(2, -1234); for (; j < qubits.size(); j++) { (void)absl::SimpleAtoi(qubits[j].id(), &qids[j]); diff --git a/tensorflow_quantum/core/src/util_qsim_test.cc b/tensorflow_quantum/core/src/util_qsim_test.cc index 7e51a6275..74b6f7e32 100644 --- a/tensorflow_quantum/core/src/util_qsim_test.cc +++ b/tensorflow_quantum/core/src/util_qsim_test.cc @@ -376,7 +376,7 @@ TEST(UtilQsimTest, ApplyGateDagger) { for (const auto& gate : simple_circuit.gates) { qsim::ApplyGate(sim, gate, sv); } - for (int i = simple_circuit.gates.size() - 1; i >= 0; i--) { + for (size_t i = simple_circuit.gates.size() - 1; i >= 0; i--) { ApplyGateDagger(sim, simple_circuit.gates[i], sv); } From 8a0905a95dea1dd0fc291c424f1a9a8d5e379dd7 Mon Sep 17 00:00:00 2001 From: mhucka Date: Fri, 10 Jan 2025 00:34:52 +0000 Subject: [PATCH 4/5] Fix miss 'const' declarations Although the compiler didn't flag them, a couple of declarations that were changed to size_t in a previous commit should probably have const declarations, to be consistent with other changes in the same function. --- .../core/ops/math_ops/tfq_inner_product_grad.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc index b7b5a34aa..1563ef37a 100644 --- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc +++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc @@ -61,9 +61,9 @@ class TfqInnerProductGradOp : public tensorflow::OpKernel { "other_programs must be rank 2. Got ", context->input(3).dims()))); // Create the output Tensor. - const int output_dim_batch_size = context->input(0).dim_size(0); - size_t output_dim_internal_size = context->input(3).dim_size(1); - size_t output_dim_symbol_size = context->input(1).dim_size(0); + const size_t output_dim_batch_size = context->input(0).dim_size(0); + const size_t output_dim_internal_size = context->input(3).dim_size(1); + const size_t output_dim_symbol_size = context->input(1).dim_size(0); OP_REQUIRES(context, output_dim_symbol_size > 0, tensorflow::errors::InvalidArgument(absl::StrCat( "The number of symbols must be a positive integer, got ", From c989fdf39553dbe73b52fc23f8e62bc66c20ac37 Mon Sep 17 00:00:00 2001 From: mhucka Date: Fri, 10 Jan 2025 17:05:46 +0000 Subject: [PATCH 5/5] Revert a change from commit f64f1b3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit One of the `size_t` → `int` changes was wrong and led to a test failure. I would prefer to edit the commit itself; however, that would require force-pushing to the TFQ repo, and I'm not sure if that will break the code review comments in the PR in GitHub. So I'm making the change in a separate commit. --- tensorflow_quantum/core/src/util_qsim_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_quantum/core/src/util_qsim_test.cc b/tensorflow_quantum/core/src/util_qsim_test.cc index d6793b25f..e6d450182 100644 --- a/tensorflow_quantum/core/src/util_qsim_test.cc +++ b/tensorflow_quantum/core/src/util_qsim_test.cc @@ -376,7 +376,7 @@ TEST(UtilQsimTest, ApplyGateDagger) { for (const auto& gate : simple_circuit.gates) { qsim::ApplyGate(sim, gate, sv); } - for (size_t i = simple_circuit.gates.size() - 1; i >= 0; i--) { + for (int i = simple_circuit.gates.size() - 1; i >= 0; i--) { ApplyGateDagger(sim, simple_circuit.gates[i], sv); }