Skip to content

Commit

Permalink
fixes for Glow
Browse files Browse the repository at this point in the history
Reviewed By: jfix71

Differential Revision: D57075318
  • Loading branch information
r-barnes authored and facebook-github-bot committed May 11, 2024
1 parent 0b05ae6 commit 354cdeb
Show file tree
Hide file tree
Showing 13 changed files with 52 additions and 47 deletions.
6 changes: 3 additions & 3 deletions include/glow/LLVMIRCodeGen/LLVMBackend.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class LLVMBackendOptions {
/// ABI to be used by this backend.
std::string abi_;
/// Float ABI to be used by this backend.
llvm::Optional<llvm::FloatABI::ABIType> floatABI_;
std::optional<llvm::FloatABI::ABIType> floatABI_;
/// Code model used by this backend.
llvm::CodeModel::Model codeModel_;
/// Code model used by this backend for bundles.
Expand Down Expand Up @@ -75,11 +75,11 @@ class LLVMBackendOptions {
/// Sets ABI used by this backend.
void setABIName(llvm::StringRef abi) { abi_ = abi.str(); }
/// \returns Float ABI used by this backend.
llvm::Optional<llvm::FloatABI::ABIType> getFloatABI() const {
std::optional<llvm::FloatABI::ABIType> getFloatABI() const {
return floatABI_;
}
/// Sets Float ABI used by this backend.
void setFloatABI(llvm::Optional<llvm::FloatABI::ABIType> floatABI) {
void setFloatABI(std::optional<llvm::FloatABI::ABIType> floatABI) {
floatABI_ = floatABI;
}
/// \returns code model used by this backend.
Expand Down
8 changes: 5 additions & 3 deletions include/glow/Optimizer/GraphOptimizer/CompilationContext.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
#include "glow/Quantization/Base/Base.h"
#include "glow/Support/Error.h"

#include <optional>

namespace glow {
namespace runtime {
struct PartitionConfig;
Expand Down Expand Up @@ -273,7 +275,7 @@ struct OptimizationOptions {
/// If it is true (false), perform (not perform) ASAP op placement in DAG
/// optimization; If it is not set, use acc perf GFlag APLASAPPlacement to
/// determine whether to perform ASAP op placement or not
llvm::Optional<bool> enableAPLASAPPlacement;
std::optional<bool> enableAPLASAPPlacement;

/// If true does int64 to int32 type demotion if backend supports for specific
/// nodes.
Expand Down Expand Up @@ -311,8 +313,8 @@ struct OptimizationOptions {
PRINT_VALUE(foldElemKindConversionIntoIO, dump_str)
PRINT_VALUE(foldStaticPlaceholderConversions, dump_str)
PRINT_VALUE(useSparseNNPartitioningScheme, dump_str)
if (enableAPLASAPPlacement.hasValue()) {
PRINT_VALUE(enableAPLASAPPlacement.getValue(), dump_str)
if (enableAPLASAPPlacement) {
PRINT_VALUE(enableAPLASAPPlacement.value(), dump_str)
}
PRINT_VALUE(enableTypeDemotion, dump_str)
PRINT_VALUE(enableQuantParamChanges, dump_str)
Expand Down
4 changes: 2 additions & 2 deletions include/glow/Support/TensorPool.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@
#define GLOW_TENSORPOOL_H

#include "glow/Base/Tensor.h"
#include "llvm/ADT/Optional.h"

#include <atomic>
#include <iostream>
#include <mutex>
#include <optional>
#include <unordered_map>
#include <vector>

Expand Down Expand Up @@ -74,7 +74,7 @@ class TensorPool final {
/// previously been added by initialize. If the pool is empty this will
/// allocate a new Tensor unless preventAllocs was set true at construction
/// time.
llvm::Optional<Tensor> get(TypeRef ty);
std::optional<Tensor> get(TypeRef ty);

/// Return a Tensor \p t to the pool. This Tensor must have been previously
/// allocated by this TensorPool.
Expand Down
4 changes: 2 additions & 2 deletions lib/Backends/Interpreter/InterpreterNodes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1476,7 +1476,7 @@ static void fwdMaxPool(Tensor *inW, Tensor *outW, Tensor *argmaxW,
ShapeHW kdim(kernelSizes);
ShapeHW sdim(strides);

llvm::Optional<Handle<int64_t>> argmaxH;
std::optional<Handle<int64_t>> argmaxH;
if (argmaxW) {
argmaxH = argmaxW->getHandle<int64_t>();
}
Expand Down Expand Up @@ -6678,7 +6678,7 @@ void BoundInterpreterFunction::fwdIntNBitSplitEmbeddingWeightedBagsImpl(
auto weightsTysH = weightsTys->getHandle<uint8_t>();
auto dimOffsetsH = dimOffsets->getHandle<int32_t>();
auto weightsOffsetsH = weightsOffsets->getHandle<WeightsOffsetTy>();
llvm::Optional<Handle<IndiceWeightTy>> indiceWeightsH;
std::optional<Handle<IndiceWeightTy>> indiceWeightsH;
if (indiceWeights) {
indiceWeightsH = indiceWeights->getHandle<IndiceWeightTy>();
}
Expand Down
1 change: 1 addition & 0 deletions lib/CodeGen/MemoryAllocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"

#define DEBUG_TYPE "memory-allocator"
Expand Down
1 change: 1 addition & 0 deletions lib/Graph/Log.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include "glow/Graph/Graph.h"
#include "glow/Graph/Node.h"
#include "glow/Graph/NodeValue.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormatVariadic.h"
Expand Down
4 changes: 2 additions & 2 deletions lib/LLVMIRCodeGen/LLVMIRGen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,8 @@ static std::mutex initTargetMutex;

void LLVMIRGen::initTargetOptions(llvm::TargetOptions &targetOpts,
const LLVMBackendOptions &backendOpts) {
if (backendOpts.getFloatABI().hasValue()) {
targetOpts.FloatABIType = backendOpts.getFloatABI().getValue();
if (backendOpts.getFloatABI().has_value()) {
targetOpts.FloatABIType = backendOpts.getFloatABI().value();
}
if (!backendOpts.getABIName().empty()) {
targetOpts.MCOptions.ABIName = backendOpts.getABIName();
Expand Down
7 changes: 3 additions & 4 deletions lib/Onnxifi/Base.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -347,15 +347,14 @@ onnxStatus Graph::adjustInputs(uint32_t inputsCount,
continue;
}

llvm::Optional<Tensor> inputTensorOpt = tensorPool_.get(inPhPtr->getType());
if (!inputTensorOpt.hasValue()) {
std::optional<Tensor> inputTensorOpt = tensorPool_.get(inPhPtr->getType());
if (!inputTensorOpt.has_value()) {
DLOG(FATAL) << "Tensorpool tensor not found for input "
<< inOnnxTensor.name;
return ONNXIFI_STATUS_INTERNAL_ERROR;
}
// We want fresh DeviceResidencyInfo for this fresh Tensor.
externalIOBindings.emplace_back(inPhPtr,
std::move(inputTensorOpt.getValue()));
externalIOBindings.emplace_back(inPhPtr, std::move(inputTensorOpt.value()));
Tensor &inputTensor = externalIOBindings.back().second;
inputTensor.resetDeviceInfo();

Expand Down
6 changes: 3 additions & 3 deletions lib/Optimizer/GraphOptimizer/GraphOptimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5474,9 +5474,9 @@ struct ChannelShuffleParams {
/// as ReshapeNode->TransposeNode->ReshapeNode) for which \p node is the leading
/// ReshapeNode. \returns The original ChannelShuffle parameters if possible and
/// empty Optional otherwise.
static llvm::Optional<ChannelShuffleParams>
static std::optional<ChannelShuffleParams>
getChannelShuffleParams(const ReshapeNode &node) {
auto resM = llvm::Optional<ChannelShuffleParams>();
std::optional<ChannelShuffleParams> resM;

llvm::ArrayRef<dim_t> inputDims = node.getInput().dims();
llvm::ArrayRef<dim_t> resultDims = node.getDims();
Expand Down Expand Up @@ -5539,7 +5539,7 @@ bool FoldChannelShuffle::run(Function *F, const CompilationContext &cctx) {

// Compute the original parameters to ChannelShuffle.
auto paramsM = getChannelShuffleParams(*RN1);
if (!paramsM.hasValue()) {
if (!paramsM.has_value()) {
continue;
}

Expand Down
6 changes: 3 additions & 3 deletions lib/Runtime/HostManager/HostManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -901,7 +901,7 @@ Error HostManager::runNetworkBlocking(
}

void HostManager::dispatchNextRun() {
llvm::Optional<InferRequest> pRequest;
std::optional<InferRequest> pRequest;
std::shared_lock<std::shared_timed_mutex> networkLock(networkLock_);
{
// hmm this lock is hot but I still have it as a unique lock because
Expand All @@ -921,8 +921,8 @@ void HostManager::dispatchNextRun() {
}
}

assert(pRequest.hasValue());
InferRequest request = std::move(pRequest.getValue());
assert(pRequest.has_value());
InferRequest request = std::move(pRequest.value());
auto startTime = TraceEvent::now();
auto requestReceived = request.startTime;
executor_->run(
Expand Down
8 changes: 5 additions & 3 deletions lib/Support/TensorPool/TensorPool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,11 @@

#include "glow/Support/TensorPool.h"

#include <optional>

namespace glow {

llvm::Optional<Tensor> TensorPool::get(TypeRef ty) {
std::optional<Tensor> TensorPool::get(TypeRef ty) {
stats_.totalGets++;

std::unique_lock<std::mutex> l(lock_);
Expand All @@ -27,7 +29,7 @@ llvm::Optional<Tensor> TensorPool::get(TypeRef ty) {

if (it == pools_.end()) {
if (preventInlineAllocs_) {
return llvm::Optional<Tensor>();
return std::nullopt;
}

stats_.totalTypes++;
Expand All @@ -36,7 +38,7 @@ llvm::Optional<Tensor> TensorPool::get(TypeRef ty) {

if (it->second.empty()) {
if (preventInlineAllocs_) {
return llvm::Optional<Tensor>();
return std::nullopt;
}

// Don't need to alloc under the lock.
Expand Down
40 changes: 20 additions & 20 deletions tests/unittests/TensorPoolTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ TEST(TensorPool, BasicTest) {
Type ty(ElemKind::FloatTy, {1, 2, 3});
pool.reserve(&ty, 1);

Tensor T = std::move(pool.get(&ty).getValue());
Tensor T = std::move(pool.get(&ty).value());
EXPECT_TRUE(T.getType().isEqual(ty));
EXPECT_EQ(T.dims(), ty.dims());

Expand All @@ -52,12 +52,12 @@ TEST(TensorPool, ReclaimAndGet) {
Type ty(ElemKind::FloatTy, {1, 2, 3});
pool.reserve(&ty, 1);

Tensor T = std::move(pool.get(&ty).getValue());
Tensor T = std::move(pool.get(&ty).value());
auto *backingPtr = T.getUnsafePtr();

pool.reclaim(std::move(T));

Tensor T2 = std::move(pool.get(&ty).getValue());
Tensor T2 = std::move(pool.get(&ty).value());
// They are the same buffer.
EXPECT_EQ(T2.getUnsafePtr(), backingPtr);

Expand All @@ -78,8 +78,8 @@ TEST(TensorPool, Extends) {
Type ty(ElemKind::FloatTy, {1, 2, 3});
pool.reserve(&ty, 1);

Tensor T = std::move(pool.get(&ty).getValue());
Tensor T2 = std::move(pool.get(&ty).getValue());
Tensor T = std::move(pool.get(&ty).value());
Tensor T2 = std::move(pool.get(&ty).value());
EXPECT_TRUE(T.getType().isEqual(T2.getType()));
EXPECT_TRUE(T.getType().isEqual(ty));
EXPECT_TRUE(T2.getType().isEqual(ty));
Expand All @@ -105,15 +105,15 @@ TEST(TensorPool, DoesntExtend) {
Type ty(ElemKind::FloatTy, {1, 2, 3});
pool.reserve(&ty, 1);

Tensor T = std::move(pool.get(&ty).getValue());
Tensor T = std::move(pool.get(&ty).value());
Type Tt = T.getType();

auto T2opt = pool.get(&ty);
EXPECT_FALSE(T2opt.hasValue());
EXPECT_FALSE(T2opt.has_value());

pool.reclaim(std::move(T));

T = std::move(pool.get(&ty).getValue());
T = std::move(pool.get(&ty).value());
EXPECT_EQ(Tt, T.getType());

const auto &stats = pool.getStats();
Expand All @@ -132,8 +132,8 @@ TEST(TensorPool, Noreserve) {
TensorPool pool;
Type ty(ElemKind::FloatTy, {1, 2, 3});

Tensor T = std::move(pool.get(&ty).getValue());
Tensor T2 = std::move(pool.get(&ty).getValue());
Tensor T = std::move(pool.get(&ty).value());
Tensor T2 = std::move(pool.get(&ty).value());

EXPECT_TRUE(T.getType().isEqual(T2.getType()));

Expand Down Expand Up @@ -162,8 +162,8 @@ TEST(TensorPool, MultipleTypes) {
std::vector<Tensor> tensors;
// Ten total allocs.
for (int i = 0; i < 5; ++i) {
Tensor T = std::move(pool.get(&ty).getValue());
Tensor T2 = std::move(pool.get(&ty2).getValue());
Tensor T = std::move(pool.get(&ty).value());
Tensor T2 = std::move(pool.get(&ty2).value());
EXPECT_FALSE(T.getType().isEqual(T2.getType()));
EXPECT_TRUE(T.getType().isEqual(ty));
EXPECT_TRUE(T2.getType().isEqual(ty2));
Expand Down Expand Up @@ -200,14 +200,14 @@ TEST(TensorPool, MultipleTypesReclaim) {
pool.reserve(&ty, 1);
pool.reserve(&ty2, 1);

Tensor T = std::move(pool.get(&ty).getValue());
Tensor T2 = std::move(pool.get(&ty2).getValue());
Tensor T = std::move(pool.get(&ty).value());
Tensor T2 = std::move(pool.get(&ty2).value());

pool.reclaim(std::move(T));
pool.reclaim(std::move(T2));

T = std::move(pool.get(&ty).getValue());
T2 = std::move(pool.get(&ty2).getValue());
T = std::move(pool.get(&ty).value());
T2 = std::move(pool.get(&ty2).value());

pool.reclaim(std::move(T));
pool.reclaim(std::move(T2));
Expand All @@ -231,7 +231,7 @@ TEST(TensorPool, PlaceholderBindingsReclaim) {
Module mod;

auto *PH = mod.createPlaceholder(&ty, "test", false);
bindings.insert(PH, std::move(pool.get(&ty).getValue()));
bindings.insert(PH, std::move(pool.get(&ty).value()));

/// Insert a non managed tensor.
auto *PH2 = mod.createPlaceholder(&ty, "test2", false);
Expand All @@ -249,7 +249,7 @@ TEST(TensorPool, PlaceholderBindingsReclaim) {
EXPECT_EQ(stats.totalGets, 1);
EXPECT_EQ(stats.totalReclaims, 1);

bindings.insert(PH, std::move(pool.get(&ty).getValue()));
bindings.insert(PH, std::move(pool.get(&ty).value()));

bindings.erase(PH);
const auto &stats2 = pool.getStats();
Expand All @@ -263,7 +263,7 @@ TEST(TensorPool, Clear) {
TensorPool pool;
Type ty(ElemKind::FloatTy, {1, 2, 3});

Tensor T = std::move(pool.get(&ty).getValue());
Tensor T = std::move(pool.get(&ty).value());
pool.reclaim(std::move(T));

const auto &stats = pool.getStats();
Expand All @@ -277,7 +277,7 @@ TEST(TensorPool, Clear) {

pool.clear();

T = std::move(pool.get(&ty).getValue());
T = std::move(pool.get(&ty).value());
pool.reclaim(std::move(T));

const auto &stats2 = pool.getStats();
Expand Down
4 changes: 2 additions & 2 deletions torch_glow/src/CachingGraphRunner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -913,13 +913,13 @@ CachingGraphRunner::convertPyTorchInputToGlowInput(
// For backends that does not support partial tensor, last-element padding
// based on size
auto inputTensorOpt = tensorPool_.get(ty);
if (!inputTensorOpt.hasValue()) {
if (!inputTensorOpt) {
std::stringstream ss;
ss << "Tensorpool tensor not found for input " << ptTensor.name();
return MAKE_ERR(ss.str());
}
// We want fresh DeviceResidencyInfo for this fresh Tensor.
glow::Tensor inputTensor(std::move(inputTensorOpt.getValue()));
glow::Tensor inputTensor(std::move(inputTensorOpt.value()));
inputTensor.resetDeviceInfo();
if (ptTensor.data_ptr()) {
auto *inTensorPtr = inputTensor.getUnsafePtr();
Expand Down

0 comments on commit 354cdeb

Please sign in to comment.