From db31126e116810aaaff7c265b61abf67edc43f4d Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Fri, 1 Sep 2023 16:32:23 +0200 Subject: [PATCH] lint c++ --- onnxruntime/contrib_ops/cuda/math/gemm_float8.cc | 1 + onnxruntime/contrib_ops/cuda/math/gemm_float8.cu | 3 ++- onnxruntime/contrib_ops/cuda/math/gemm_float8.h | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/onnxruntime/contrib_ops/cuda/math/gemm_float8.cc b/onnxruntime/contrib_ops/cuda/math/gemm_float8.cc index 1ec94796fd71..b413b6af5e99 100644 --- a/onnxruntime/contrib_ops/cuda/math/gemm_float8.cc +++ b/onnxruntime/contrib_ops/cuda/math/gemm_float8.cc @@ -7,6 +7,7 @@ #include "core/providers/cuda/shared_inc/fpgeneric.h" #include "core/providers/cpu/math/gemm_helper.h" #include "gemm_float8.h" +#include using namespace ONNX_NAMESPACE; diff --git a/onnxruntime/contrib_ops/cuda/math/gemm_float8.cu b/onnxruntime/contrib_ops/cuda/math/gemm_float8.cu index 3384888ccd09..ce05450d8859 100644 --- a/onnxruntime/contrib_ops/cuda/math/gemm_float8.cu +++ b/onnxruntime/contrib_ops/cuda/math/gemm_float8.cu @@ -5,6 +5,7 @@ #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/shared_inc/cuda_utils.h" #include +#include #include namespace onnxruntime { @@ -379,7 +380,7 @@ Status GemmFloat8::ComputeGemm( // The workspace should be allocated once from OpKernelContext assuming // only one cuda function is running at a time (which is not necessarily true // with H100). - size_t workspaceSize = (size_t)(1 << 25); // suggested fixed value 32Mb + size_t workspaceSize = static_cast(1 << 25); // suggested fixed value 32Mb cublasLtMatmulPreference_t preference = nullptr; cublasLtMatmulPreferenceCreate(&preference); cublasLtMatmulPreferenceSetAttribute(preference, diff --git a/onnxruntime/contrib_ops/cuda/math/gemm_float8.h b/onnxruntime/contrib_ops/cuda/math/gemm_float8.h index d12e177411fa..98c9b6f59f12 100644 --- a/onnxruntime/contrib_ops/cuda/math/gemm_float8.h +++ b/onnxruntime/contrib_ops/cuda/math/gemm_float8.h @@ -60,7 +60,7 @@ class GemmFloat8 final : public onnxruntime::cuda::CudaKernel { cublasComputeType_t compute_type_; cublasLtEpilogue_t epilogue_; - // TODO: add epilogue (= activation function, Relu or Gelu are available). + // TODO(xadupre): add epilogue (= activation function, Relu or Gelu are available). }; } // namespace cuda