Skip to content

Commit

Permalink
s/exec_aten::/executorch::aten::/ for extension/**/*.h (#6106)
Browse files Browse the repository at this point in the history
s/exec_aten::/executorch::aten::/ for extension/**/*.h (#6032)

Summary:
Pull Request resolved: #6032

Migrate all extension headers to use the new aten namespace, so that they act as good examples for users. The .cpp code can migrate later.

Reviewed By: lucylq

Differential Revision: D64079593

fbshipit-source-id: 62164cd6ec3238e21e896813f185d0c1446ef527
(cherry picked from commit 69c2c76)

Co-authored-by: Dave Bort <dbort@meta.com>
  • Loading branch information
pytorchbot and dbort authored Oct 11, 2024
1 parent 0b82b17 commit 99827e4
Show file tree
Hide file tree
Showing 10 changed files with 272 additions and 253 deletions.
2 changes: 1 addition & 1 deletion extension/android/jni/jni_layer_constants.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ constexpr static int kTensorDTypeBits4x2 = 20;
constexpr static int kTensorDTypeBits8 = 21;
constexpr static int kTensorDTypeBits16 = 22;

using exec_aten::ScalarType;
using executorch::aten::ScalarType;

const std::unordered_map<ScalarType, int> scalar_type_to_java_dtype = {
{ScalarType::Byte, kTensorDTypeUInt8},
Expand Down
25 changes: 13 additions & 12 deletions extension/kernel_util/make_boxed_from_unboxed_functor.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,12 @@ struct decay_if_not_tensor final {
using type = std::decay_t<T>;
};
template <>
struct decay_if_not_tensor<exec_aten::Tensor&> final {
using type = exec_aten::Tensor&;
struct decay_if_not_tensor<executorch::aten::Tensor&> final {
using type = executorch::aten::Tensor&;
};
template <>
struct decay_if_not_tensor<const exec_aten::Tensor&> final {
using type = const exec_aten::Tensor&;
struct decay_if_not_tensor<const executorch::aten::Tensor&> final {
using type = const executorch::aten::Tensor&;
};

template <class T>
Expand All @@ -82,29 +82,30 @@ struct evalue_to_arg final {
};

template <>
struct evalue_to_arg<exec_aten::Tensor&> final {
static exec_aten::Tensor& call(executorch::runtime::EValue& v) {
struct evalue_to_arg<executorch::aten::Tensor&> final {
static executorch::aten::Tensor& call(executorch::runtime::EValue& v) {
return v.toTensor();
}
};

template <>
struct evalue_to_arg<const exec_aten::Tensor&> final {
static const exec_aten::Tensor& call(executorch::runtime::EValue& v) {
struct evalue_to_arg<const executorch::aten::Tensor&> final {
static const executorch::aten::Tensor& call(executorch::runtime::EValue& v) {
return v.toTensor();
}
};

template <class T>
struct evalue_to_arg<exec_aten::optional<T>> final {
static exec_aten::optional<T> call(executorch::runtime::EValue& v) {
struct evalue_to_arg<executorch::aten::optional<T>> final {
static executorch::aten::optional<T> call(executorch::runtime::EValue& v) {
return v.toOptional<T>();
}
};

template <class T>
struct evalue_to_arg<exec_aten::ArrayRef<exec_aten::optional<T>>> final {
static exec_aten::ArrayRef<exec_aten::optional<T>> call(
struct evalue_to_arg<executorch::aten::ArrayRef<executorch::aten::optional<T>>>
final {
static executorch::aten::ArrayRef<executorch::aten::optional<T>> call(
executorch::runtime::EValue& v) {
return v.toListOptionalTensor();
}
Expand Down
2 changes: 1 addition & 1 deletion extension/llm/runner/image_prefiller.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class ImagePrefiller {
* It's passed as reference and will be updated inside this function.
* @return The next token of the LLM Module after prefill.
*/
virtual ::executorch::runtime::Result<exec_aten::Tensor> prefill(
virtual ::executorch::runtime::Result<executorch::aten::Tensor> prefill(
Image& image,
int64_t& start_pos) = 0;

Expand Down
5 changes: 3 additions & 2 deletions extension/llm/runner/text_decoder_runner.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class TextDecoderRunner {
* Module.
* @return The output of the LLM Module. This will be a tensor of logits.
*/
virtual ::executorch::runtime::Result<exec_aten::Tensor> step(
virtual ::executorch::runtime::Result<executorch::aten::Tensor> step(
TensorPtr& input,
TensorPtr& start_pos);

Expand Down Expand Up @@ -66,7 +66,8 @@ class TextDecoderRunner {
* @param logits_tensor The logits tensor.
* @return The next token.
*/
inline int32_t logits_to_token(const exec_aten::Tensor& logits_tensor) {
inline int32_t logits_to_token(
const executorch::aten::Tensor& logits_tensor) {
int32_t result = 0;
ET_SWITCH_THREE_TYPES(
Float,
Expand Down
11 changes: 6 additions & 5 deletions extension/llm/runner/text_token_generator.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ class TextTokenGenerator {
int64_t pos = start_pos; // position in the sequence

std::vector<uint64_t> token_data; // allocate space for the tokens
std::vector<exec_aten::SizesType> token_shape;
std::vector<executorch::aten::SizesType> token_shape;

// Token after prefill
uint64_t cur_token = tokens.back();
Expand All @@ -70,10 +70,11 @@ class TextTokenGenerator {
}

// initialize tensor wrappers
auto tokens_managed =
from_blob(token_data.data(), token_shape, exec_aten::ScalarType::Long);
auto tokens_managed = from_blob(
token_data.data(), token_shape, executorch::aten::ScalarType::Long);

auto start_pos_managed = from_blob(&pos, {1}, exec_aten::ScalarType::Long);
auto start_pos_managed =
from_blob(&pos, {1}, executorch::aten::ScalarType::Long);

// Generate our tokens
while (pos < seq_len - 1) {
Expand All @@ -82,7 +83,7 @@ class TextTokenGenerator {
text_decoder_runner_->step(tokens_managed, start_pos_managed);

ET_CHECK_OK_OR_RETURN_ERROR(logits_res.error());
exec_aten::Tensor& logits_tensor = logits_res.get();
executorch::aten::Tensor& logits_tensor = logits_res.get();

prev_token = cur_token;

Expand Down
98 changes: 52 additions & 46 deletions extension/tensor/tensor_impl_ptr.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ namespace extension {
* It serves as a safer, more convenient alternative to the original TensorImpl,
* which does not manage its metadata by design.
*/
using TensorImplPtr = std::shared_ptr<exec_aten::TensorImpl>;
using TensorImplPtr = std::shared_ptr<executorch::aten::TensorImpl>;
#else
/**
* A smart pointer type for managing the lifecycle of a TensorImpl.
Expand All @@ -40,7 +40,7 @@ using TensorImplPtr = std::shared_ptr<exec_aten::TensorImpl>;
* metadata.
*/
using TensorImplPtr =
c10::intrusive_ptr<exec_aten::TensorImpl, at::UndefinedTensorImpl>;
c10::intrusive_ptr<executorch::aten::TensorImpl, at::UndefinedTensorImpl>;
#endif // USE_ATEN_LIB

/**
Expand All @@ -59,13 +59,13 @@ using TensorImplPtr =
* @return A TensorImplPtr managing the newly created TensorImpl.
*/
TensorImplPtr make_tensor_impl_ptr(
std::vector<exec_aten::SizesType> sizes,
std::vector<executorch::aten::SizesType> sizes,
void* data,
std::vector<exec_aten::DimOrderType> dim_order,
std::vector<exec_aten::StridesType> strides,
exec_aten::ScalarType type = exec_aten::ScalarType::Float,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND,
std::vector<executorch::aten::DimOrderType> dim_order,
std::vector<executorch::aten::StridesType> strides,
executorch::aten::ScalarType type = executorch::aten::ScalarType::Float,
executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND,
std::function<void(void*)> deleter = nullptr);

/**
Expand All @@ -82,11 +82,11 @@ TensorImplPtr make_tensor_impl_ptr(
* @return A TensorImplPtr managing the newly created TensorImpl.
*/
inline TensorImplPtr make_tensor_impl_ptr(
std::vector<exec_aten::SizesType> sizes,
std::vector<executorch::aten::SizesType> sizes,
void* data,
exec_aten::ScalarType type = exec_aten::ScalarType::Float,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND,
executorch::aten::ScalarType type = executorch::aten::ScalarType::Float,
executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND,
std::function<void(void*)> deleter = nullptr) {
return make_tensor_impl_ptr(
std::move(sizes), data, {}, {}, type, dynamism, std::move(deleter));
Expand Down Expand Up @@ -114,15 +114,16 @@ inline TensorImplPtr make_tensor_impl_ptr(
*/
template <
typename T = float,
exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType<T>::value>
executorch::aten::ScalarType deduced_type =
runtime::CppTypeToScalarType<T>::value>
TensorImplPtr make_tensor_impl_ptr(
std::vector<exec_aten::SizesType> sizes,
std::vector<executorch::aten::SizesType> sizes,
std::vector<T> data,
std::vector<exec_aten::DimOrderType> dim_order = {},
std::vector<exec_aten::StridesType> strides = {},
exec_aten::ScalarType type = deduced_type,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) {
std::vector<executorch::aten::DimOrderType> dim_order = {},
std::vector<executorch::aten::StridesType> strides = {},
executorch::aten::ScalarType type = deduced_type,
executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND) {
if (type != deduced_type) {
ET_CHECK_MSG(
runtime::canCast(deduced_type, type),
Expand Down Expand Up @@ -179,13 +180,15 @@ TensorImplPtr make_tensor_impl_ptr(
*/
template <
typename T = float,
exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType<T>::value>
executorch::aten::ScalarType deduced_type =
runtime::CppTypeToScalarType<T>::value>
inline TensorImplPtr make_tensor_impl_ptr(
std::vector<T> data,
exec_aten::ScalarType type = deduced_type,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) {
std::vector<exec_aten::SizesType> sizes{exec_aten::SizesType(data.size())};
executorch::aten::ScalarType type = deduced_type,
executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND) {
std::vector<executorch::aten::SizesType> sizes{
executorch::aten::SizesType(data.size())};
return make_tensor_impl_ptr(
std::move(sizes), std::move(data), {0}, {1}, type, dynamism);
}
Expand Down Expand Up @@ -213,15 +216,16 @@ inline TensorImplPtr make_tensor_impl_ptr(
*/
template <
typename T = float,
exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType<T>::value>
executorch::aten::ScalarType deduced_type =
runtime::CppTypeToScalarType<T>::value>
inline TensorImplPtr make_tensor_impl_ptr(
std::vector<exec_aten::SizesType> sizes,
std::vector<executorch::aten::SizesType> sizes,
std::initializer_list<T> list,
std::vector<exec_aten::DimOrderType> dim_order = {},
std::vector<exec_aten::StridesType> strides = {},
exec_aten::ScalarType type = deduced_type,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) {
std::vector<executorch::aten::DimOrderType> dim_order = {},
std::vector<executorch::aten::StridesType> strides = {},
executorch::aten::ScalarType type = deduced_type,
executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND) {
return make_tensor_impl_ptr(
std::move(sizes),
std::vector<T>(std::move(list)),
Expand Down Expand Up @@ -251,13 +255,15 @@ inline TensorImplPtr make_tensor_impl_ptr(
*/
template <
typename T = float,
exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType<T>::value>
executorch::aten::ScalarType deduced_type =
runtime::CppTypeToScalarType<T>::value>
inline TensorImplPtr make_tensor_impl_ptr(
std::initializer_list<T> list,
exec_aten::ScalarType type = deduced_type,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) {
std::vector<exec_aten::SizesType> sizes{exec_aten::SizesType(list.size())};
executorch::aten::ScalarType type = deduced_type,
executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND) {
std::vector<executorch::aten::SizesType> sizes{
executorch::aten::SizesType(list.size())};
return make_tensor_impl_ptr(
std::move(sizes), std::move(list), {0}, {1}, type, dynamism);
}
Expand Down Expand Up @@ -291,13 +297,13 @@ inline TensorImplPtr make_tensor_impl_ptr(T value) {
* @return A TensorImplPtr managing the newly created TensorImpl.
*/
TensorImplPtr make_tensor_impl_ptr(
std::vector<exec_aten::SizesType> sizes,
std::vector<executorch::aten::SizesType> sizes,
std::vector<uint8_t> data,
std::vector<exec_aten::DimOrderType> dim_order,
std::vector<exec_aten::StridesType> strides,
exec_aten::ScalarType type = exec_aten::ScalarType::Float,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND);
std::vector<executorch::aten::DimOrderType> dim_order,
std::vector<executorch::aten::StridesType> strides,
executorch::aten::ScalarType type = executorch::aten::ScalarType::Float,
executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND);

/**
* Creates a TensorImplPtr that manages a newly created TensorImpl with the
Expand All @@ -314,11 +320,11 @@ TensorImplPtr make_tensor_impl_ptr(
* @return A TensorImplPtr managing the newly created TensorImpl.
*/
inline TensorImplPtr make_tensor_impl_ptr(
std::vector<exec_aten::SizesType> sizes,
std::vector<executorch::aten::SizesType> sizes,
std::vector<uint8_t> data,
exec_aten::ScalarType type = exec_aten::ScalarType::Float,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) {
executorch::aten::ScalarType type = executorch::aten::ScalarType::Float,
executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND) {
return make_tensor_impl_ptr(
std::move(sizes), std::move(data), {}, {}, type, dynamism);
}
Expand Down
Loading

0 comments on commit 99827e4

Please sign in to comment.