// required for old g++ to compile PRId64 macros, see
// https://github.com/pytorch/pytorch/issues/3571
// for context
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif

// an external backend might generate file within its code tree
// and check all the source files within the tree with clang-format.
// so, disable it since the backend might have a different config.
// clang-format off

// NOTE: This condition is true for all PyTorch internal libraries, it
//       just excludes external projects such as torch_xla which
//       re-use some of the PyTorch codegen machinery.
#if defined(CAFFE2_BUILD_MAIN_LIB)        || \
    defined(TORCH_CUDA_BUILD_MAIN_LIB)    || \
    defined(TORCH_HIP_BUILD_MAIN_LIB)     || \
    defined(TORCH_XPU_BUILD_MAIN_LIB)     || \
    defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
    defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#endif

// @generated by torchgen/gen.py from RegisterDispatchKey.cpp

#include <c10/core/TensorImpl.h>
#include <c10/core/Allocator.h>
#include <ATen/DeviceGuard.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/Dispatch.h>
#include <c10/util/ExclusivelyOwned.h>
#include <c10/util/Half.h>
#include <c10/core/UndefinedTensorImpl.h>
#include <optional>
#include <ATen/Tensor.h>
#include <ATen/native/Resize.h>

#include <cstddef>
#include <functional>
#include <memory>
#include <utility>

#include <ATen/Config.h>
#include <ATen/core/op_registration/adaption.h>
#include <torch/library.h>


#include <ATen/ops/as_strided_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_strided.h>
#include <ATen/ops/_copy_from_and_resize.h>
#include <ATen/ops/_copy_from.h>
#include <c10/macros/Macros.h>
#include <ATen/ops/_coalesced_native.h>
#include <ATen/ops/_dimV_native.h>
#include <ATen/ops/_indices_native.h>
#include <ATen/ops/_nnz_native.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_native.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_native.h>
#include <ATen/ops/_values_native.h>
#include <ATen/ops/add_native.h>
#include <ATen/ops/copy_sparse_to_sparse_native.h>
#include <ATen/ops/dense_dim_native.h>
#include <ATen/ops/empty_like_native.h>
#include <ATen/ops/empty_native.h>
#include <ATen/ops/indices_native.h>
#include <ATen/ops/is_coalesced_native.h>
#include <ATen/ops/isinf_native.h>
#include <ATen/ops/sparse_dim_native.h>
#include <ATen/ops/sparse_resize_and_clear_native.h>
#include <ATen/ops/sparse_resize_native.h>
#include <ATen/ops/sum_native.h>
#include <ATen/ops/values_native.h>
#include <ATen/ops/zero_native.h>
#include <ATen/ops/zeros_native.h>

namespace at {
namespace {
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-function")

void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
  TORCH_CHECK(options.dtype() == out.dtype(),
      "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
  TORCH_CHECK(options.device() == out.device(),
      "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
  const bool resized = at::native::resize_output(out, sizes);
  // Only restride if a resize occurred; otherwise we ignore the (advisory)
  // strides from the meta function and directly use the output tensor's
  // preexisting strides
  if (resized) {
    if (!strides.empty()) {
      TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
      // TODO: avoid the redispatch here
      out.as_strided_(sizes, strides);
    } else if (options.memory_format_opt().has_value()) {
      out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
    }
  }
}

void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
  // These checks are needed on those operators that:
  //   1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
  //   2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
  // For other operators (e.g. 'add'), 'TensorIterator' already checks
  // these things separately.
  TORCH_CHECK(options.dtype() == self.dtype(),
      "Bad in-place call: ",
      "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
  TORCH_CHECK(options.device() == self.device(),
      "Bad in-place call: ",
      "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
  TORCH_CHECK(sizes == self.sizes(),
      "Bad in-place call: ",
      "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
}
C10_DIAGNOSTIC_POP()
} // namespace
} // namespace at

// See template file RegisterDispatchDefinitions.ini
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseMeta_Tensor_add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::add_sparse(self, other, alpha);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseMeta_out_add_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::add_out_sparse_cpu(self, other, alpha, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseMeta_Tensor_add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::add_sparse_(self, other, alpha);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("add.Tensor",
TORCH_FN(wrapper_SparseMeta_Tensor_add));
m.impl("add.out",
TORCH_FN(wrapper_SparseMeta_out_add_out));
m.impl("add_.Tensor",
TORCH_FN(wrapper_SparseMeta_Tensor_add_));
}
} // anonymous namespace
namespace sparsemeta {
at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_SparseMeta_Tensor_add(self, other, alpha);
}
at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_SparseMeta_out_add_out(self, other, alpha, out);
}
at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_SparseMeta_out_add_out(self, other, alpha, out);
}
at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_SparseMeta_Tensor_add_(self, other, alpha);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseMeta_memory_format_empty(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    // No device check
  // DeviceGuard omitted
  return at::native::empty_sparse_symint(size, dtype, layout, device, pin_memory, memory_format);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("empty.memory_format",
TORCH_FN(wrapper_SparseMeta_memory_format_empty));
}
} // anonymous namespace
namespace sparsemeta {
at::Tensor empty(at::IntArrayRef size, at::TensorOptions options, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseMeta_memory_format_empty(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
}
at::Tensor empty(at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseMeta_memory_format_empty(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format);
}
at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseMeta_memory_format_empty(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
}
at::Tensor empty_symint(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseMeta_memory_format_empty(size, dtype, layout, device, pin_memory, memory_format);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseMeta__empty_like(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    // No device check
  // DeviceGuard omitted
  return at::native::empty_like_sparse_coo(self, dtype, layout, device, pin_memory, memory_format);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("empty_like",
TORCH_FN(wrapper_SparseMeta__empty_like));
}
} // anonymous namespace
namespace sparsemeta {
at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseMeta__empty_like(self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
}
at::Tensor empty_like(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseMeta__empty_like(self, dtype, layout, device, pin_memory, memory_format);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseMeta__sum(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::sum_coo(self, dtype);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("sum",
TORCH_FN(wrapper_SparseMeta__sum));
}
} // anonymous namespace
namespace sparsemeta {
at::Tensor sum(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
return wrapper_SparseMeta__sum(self, dtype);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseMeta_out_zeros_out(c10::SymIntArrayRef size, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::zeros_sparse_out(C10_AS_INTARRAYREF_SLOW(size), out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("zeros.out",
TORCH_FN(wrapper_SparseMeta_out_zeros_out));
}
} // anonymous namespace
namespace sparsemeta {
at::Tensor & zeros_out(at::Tensor & out, at::IntArrayRef size) {
return wrapper_SparseMeta_out_zeros_out(c10::fromIntArrayRefSlow(size), out);
}
at::Tensor & zeros_outf(at::IntArrayRef size, at::Tensor & out) {
return wrapper_SparseMeta_out_zeros_out(c10::fromIntArrayRefSlow(size), out);
}
at::Tensor & zeros_symint_out(at::Tensor & out, c10::SymIntArrayRef size) {
return wrapper_SparseMeta_out_zeros_out(size, out);
}
at::Tensor & zeros_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) {
return wrapper_SparseMeta_out_zeros_out(size, out);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseMeta__zero_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::zero_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("zero_",
TORCH_FN(wrapper_SparseMeta__zero_));
}
} // anonymous namespace
namespace sparsemeta {
at::Tensor & zero_(at::Tensor & self) {
return wrapper_SparseMeta__zero_(self);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseMeta___sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    // No device check
  // DeviceGuard omitted
  return at::native::new_with_dims_sparse(sparse_dim, dense_dim, size, dtype, layout, device, pin_memory);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("_sparse_coo_tensor_with_dims",
TORCH_FN(wrapper_SparseMeta___sparse_coo_tensor_with_dims));
}
} // anonymous namespace
namespace sparsemeta {
at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options) {
return wrapper_SparseMeta___sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
}
at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
return wrapper_SparseMeta___sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, dtype, layout, device, pin_memory);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseMeta___sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
    // No device check
  // DeviceGuard omitted
  return at::native::new_with_dims_and_tensor_sparse_symint(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("_sparse_coo_tensor_with_dims_and_tensors",
TORCH_FN(wrapper_SparseMeta___sparse_coo_tensor_with_dims_and_tensors));
}
} // anonymous namespace
namespace sparsemeta {
at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, ::std::optional<bool> is_coalesced) {
return wrapper_SparseMeta___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced);
}
at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
return wrapper_SparseMeta___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, dtype, layout, device, pin_memory, is_coalesced);
}
at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, ::std::optional<bool> is_coalesced) {
return wrapper_SparseMeta___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced);
}
at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
return wrapper_SparseMeta___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
const at::Tensor & wrapper_SparseMeta__sparse_resize_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_resize_(self, size, sparse_dim, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("sparse_resize_",
TORCH_FN(wrapper_SparseMeta__sparse_resize_));
}
} // anonymous namespace
namespace sparsemeta {
const at::Tensor & sparse_resize_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
return wrapper_SparseMeta__sparse_resize_(self, size, sparse_dim, dense_dim);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
const at::Tensor & wrapper_SparseMeta__sparse_resize_and_clear_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_resize_and_clear_(self, size, sparse_dim, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("sparse_resize_and_clear_",
TORCH_FN(wrapper_SparseMeta__sparse_resize_and_clear_));
}
} // anonymous namespace
namespace sparsemeta {
const at::Tensor & sparse_resize_and_clear_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
return wrapper_SparseMeta__sparse_resize_and_clear_(self, size, sparse_dim, dense_dim);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
int64_t wrapper_SparseMeta__sparse_dim(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_dim_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("sparse_dim",
TORCH_FN(wrapper_SparseMeta__sparse_dim));
}
} // anonymous namespace
namespace sparsemeta {
int64_t sparse_dim(const at::Tensor & self) {
return wrapper_SparseMeta__sparse_dim(self);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
int64_t wrapper_SparseMeta__dense_dim(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::dense_dim_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("dense_dim",
TORCH_FN(wrapper_SparseMeta__dense_dim));
}
} // anonymous namespace
namespace sparsemeta {
int64_t dense_dim(const at::Tensor & self) {
return wrapper_SparseMeta__dense_dim(self);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
int64_t wrapper_SparseMeta___dimV(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::dense_dim_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("_dimV",
TORCH_FN(wrapper_SparseMeta___dimV));
}
} // anonymous namespace
namespace sparsemeta {
int64_t _dimV(const at::Tensor & self) {
return wrapper_SparseMeta___dimV(self);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
int64_t wrapper_SparseMeta___nnz(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::_nnz_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("_nnz",
TORCH_FN(wrapper_SparseMeta___nnz));
}
} // anonymous namespace
namespace sparsemeta {
int64_t _nnz(const at::Tensor & self) {
return wrapper_SparseMeta___nnz(self);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
bool wrapper_SparseMeta__is_coalesced(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::is_coalesced_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("is_coalesced",
TORCH_FN(wrapper_SparseMeta__is_coalesced));
}
} // anonymous namespace
namespace sparsemeta {
bool is_coalesced(const at::Tensor & self) {
return wrapper_SparseMeta__is_coalesced(self);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseMeta___indices(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::_indices_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("_indices",
TORCH_FN(wrapper_SparseMeta___indices));
}
} // anonymous namespace
namespace sparsemeta {
at::Tensor _indices(const at::Tensor & self) {
return wrapper_SparseMeta___indices(self);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseMeta___values(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::_values_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("_values",
TORCH_FN(wrapper_SparseMeta___values));
}
} // anonymous namespace
namespace sparsemeta {
at::Tensor _values(const at::Tensor & self) {
return wrapper_SparseMeta___values(self);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseMeta___coalesced_(at::Tensor & self, bool coalesced) {
    // No device check
  // DeviceGuard omitted
  return at::native::_coalesced_sparse_(self, coalesced);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("_coalesced_",
TORCH_FN(wrapper_SparseMeta___coalesced_));
}
} // anonymous namespace
namespace sparsemeta {
at::Tensor & _coalesced_(at::Tensor & self, bool coalesced) {
return wrapper_SparseMeta___coalesced_(self, coalesced);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseMeta__indices(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::indices_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("indices",
TORCH_FN(wrapper_SparseMeta__indices));
}
} // anonymous namespace
namespace sparsemeta {
at::Tensor indices(const at::Tensor & self) {
return wrapper_SparseMeta__indices(self);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseMeta__values(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::values_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("values",
TORCH_FN(wrapper_SparseMeta__values));
}
} // anonymous namespace
namespace sparsemeta {
at::Tensor values(const at::Tensor & self) {
return wrapper_SparseMeta__values(self);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseMeta__copy_sparse_to_sparse_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
    // No device check
  // DeviceGuard omitted
  return at::native::copy_sparse_(self, src, non_blocking);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("copy_sparse_to_sparse_",
TORCH_FN(wrapper_SparseMeta__copy_sparse_to_sparse_));
}
} // anonymous namespace
namespace sparsemeta {
at::Tensor & copy_sparse_to_sparse_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
return wrapper_SparseMeta__copy_sparse_to_sparse_(self, src, non_blocking);
}
} // namespace sparsemeta
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseMeta__isinf(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::isinf_sparse_meta(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseMeta, m) {
    m.impl("isinf",
TORCH_FN(wrapper_SparseMeta__isinf));
}
} // anonymous namespace
namespace sparsemeta {
at::Tensor isinf(const at::Tensor & self) {
return wrapper_SparseMeta__isinf(self);
}
} // namespace sparsemeta
} // namespace at
