// required for old g++ to compile PRId64 macros, see
// https://github.com/pytorch/pytorch/issues/3571
// for context
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif

// an external backend might generate file within its code tree
// and check all the source files within the tree with clang-format.
// so, disable it since the backend might have a different config.
// clang-format off

// NOTE: This condition is true for all PyTorch internal libraries, it
//       just excludes external projects such as torch_xla which
//       re-use some of the PyTorch codegen machinery.
#if defined(CAFFE2_BUILD_MAIN_LIB)        || \
    defined(TORCH_CUDA_BUILD_MAIN_LIB)    || \
    defined(TORCH_HIP_BUILD_MAIN_LIB)     || \
    defined(TORCH_XPU_BUILD_MAIN_LIB)     || \
    defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
    defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#endif

// @generated by torchgen/gen.py from RegisterDispatchKey.cpp

#include <c10/core/TensorImpl.h>
#include <c10/core/Allocator.h>
#include <ATen/DeviceGuard.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/Dispatch.h>
#include <c10/util/ExclusivelyOwned.h>
#include <c10/util/Half.h>
#include <c10/core/UndefinedTensorImpl.h>
#include <optional>
#include <ATen/Tensor.h>
#include <ATen/native/Resize.h>

#include <cstddef>
#include <functional>
#include <memory>
#include <utility>

#include <ATen/Config.h>
#include <ATen/core/op_registration/adaption.h>
#include <torch/library.h>


#include <ATen/ops/as_strided_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_strided.h>
#include <ATen/ops/_copy_from_and_resize.h>
#include <ATen/ops/_copy_from.h>
#include <c10/macros/Macros.h>
#include <ATen/ops/_coalesce_native.h>
#include <ATen/ops/_coalesced_native.h>
#include <ATen/ops/_dimI_native.h>
#include <ATen/ops/_dimV_native.h>
#include <ATen/ops/_indices_native.h>
#include <ATen/ops/_nnz_native.h>
#include <ATen/ops/_pin_memory_native.h>
#include <ATen/ops/_sparse_broadcast_to_native.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_native.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_native.h>
#include <ATen/ops/_sparse_log_softmax_backward_data_native.h>
#include <ATen/ops/_sparse_log_softmax_native.h>
#include <ATen/ops/_sparse_mask_projection_native.h>
#include <ATen/ops/_sparse_softmax_backward_data_native.h>
#include <ATen/ops/_sparse_softmax_native.h>
#include <ATen/ops/_sparse_sparse_matmul_native.h>
#include <ATen/ops/_sparse_sum_backward_native.h>
#include <ATen/ops/_to_dense_native.h>
#include <ATen/ops/_to_sparse_bsc_native.h>
#include <ATen/ops/_to_sparse_bsr_native.h>
#include <ATen/ops/_to_sparse_csc_native.h>
#include <ATen/ops/_to_sparse_csr_native.h>
#include <ATen/ops/_to_sparse_native.h>
#include <ATen/ops/_values_native.h>
#include <ATen/ops/abs_native.h>
#include <ATen/ops/add_native.h>
#include <ATen/ops/addmm_native.h>
#include <ATen/ops/any_native.h>
#include <ATen/ops/asin_native.h>
#include <ATen/ops/asinh_native.h>
#include <ATen/ops/atan_native.h>
#include <ATen/ops/atanh_native.h>
#include <ATen/ops/bmm_native.h>
#include <ATen/ops/cat_native.h>
#include <ATen/ops/ceil_native.h>
#include <ATen/ops/clone_native.h>
#include <ATen/ops/conj_physical_native.h>
#include <ATen/ops/copy_native.h>
#include <ATen/ops/copy_sparse_to_sparse_native.h>
#include <ATen/ops/deg2rad_native.h>
#include <ATen/ops/dense_dim_native.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/empty_like_native.h>
#include <ATen/ops/empty_native.h>
#include <ATen/ops/erf_native.h>
#include <ATen/ops/erfinv_native.h>
#include <ATen/ops/expm1_native.h>
#include <ATen/ops/floor_divide_native.h>
#include <ATen/ops/floor_native.h>
#include <ATen/ops/frac_native.h>
#include <ATen/ops/hspmm_native.h>
#include <ATen/ops/index_select_native.h>
#include <ATen/ops/indices_native.h>
#include <ATen/ops/is_coalesced_native.h>
#include <ATen/ops/is_pinned_native.h>
#include <ATen/ops/isinf_native.h>
#include <ATen/ops/isnan_native.h>
#include <ATen/ops/isneginf_native.h>
#include <ATen/ops/isposinf_native.h>
#include <ATen/ops/log1p_native.h>
#include <ATen/ops/mm_native.h>
#include <ATen/ops/mul_native.h>
#include <ATen/ops/mv_native.h>
#include <ATen/ops/nan_to_num_native.h>
#include <ATen/ops/narrow_copy_native.h>
#include <ATen/ops/native_norm_native.h>
#include <ATen/ops/neg_native.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/permute_native.h>
#include <ATen/ops/pow_native.h>
#include <ATen/ops/rad2deg_native.h>
#include <ATen/ops/relu_native.h>
#include <ATen/ops/resize_as_sparse_native.h>
#include <ATen/ops/round_native.h>
#include <ATen/ops/sgn_native.h>
#include <ATen/ops/sign_native.h>
#include <ATen/ops/signbit_native.h>
#include <ATen/ops/sin_native.h>
#include <ATen/ops/sinh_native.h>
#include <ATen/ops/sparse_dim_native.h>
#include <ATen/ops/sparse_mask_native.h>
#include <ATen/ops/sparse_resize_and_clear_native.h>
#include <ATen/ops/sparse_resize_native.h>
#include <ATen/ops/sqrt_native.h>
#include <ATen/ops/sspaddmm_native.h>
#include <ATen/ops/sub_native.h>
#include <ATen/ops/sum_native.h>
#include <ATen/ops/tan_native.h>
#include <ATen/ops/tanh_native.h>
#include <ATen/ops/threshold_backward_native.h>
#include <ATen/ops/trunc_native.h>
#include <ATen/ops/unsqueeze_native.h>
#include <ATen/ops/values_native.h>
#include <ATen/ops/zero_native.h>
#include <ATen/ops/zeros_native.h>

namespace at {
namespace {
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-function")

void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
  TORCH_CHECK(options.dtype() == out.dtype(),
      "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
  TORCH_CHECK(options.device() == out.device(),
      "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
  const bool resized = at::native::resize_output(out, sizes);
  // Only restride if a resize occurred; otherwise we ignore the (advisory)
  // strides from the meta function and directly use the output tensor's
  // preexisting strides
  if (resized) {
    if (!strides.empty()) {
      TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
      // TODO: avoid the redispatch here
      out.as_strided_(sizes, strides);
    } else if (options.memory_format_opt().has_value()) {
      out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
    }
  }
}

void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
  // These checks are needed on those operators that:
  //   1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
  //   2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
  // For other operators (e.g. 'add'), 'TensorIterator' already checks
  // these things separately.
  TORCH_CHECK(options.dtype() == self.dtype(),
      "Bad in-place call: ",
      "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
  TORCH_CHECK(options.device() == self.device(),
      "Bad in-place call: ",
      "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
  TORCH_CHECK(sizes == self.sizes(),
      "Bad in-place call: ",
      "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
}
C10_DIAGNOSTIC_POP()
} // namespace
} // namespace at

// See template file RegisterDispatchDefinitions.ini
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__abs(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::abs_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_abs_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::abs_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__abs_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::abs_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("abs",
TORCH_FN(wrapper_SparseCPU__abs));
m.impl("abs.out",
TORCH_FN(wrapper_SparseCPU_out_abs_out));
m.impl("abs_",
TORCH_FN(wrapper_SparseCPU__abs_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor abs(const at::Tensor & self) {
return wrapper_SparseCPU__abs(self);
}
at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_abs_out(self, out);
}
at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_abs_out(self, out);
}
at::Tensor & abs_(at::Tensor & self) {
return wrapper_SparseCPU__abs_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__sgn(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sgn_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_sgn_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::sgn_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__sgn_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sgn_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("sgn",
TORCH_FN(wrapper_SparseCPU__sgn));
m.impl("sgn.out",
TORCH_FN(wrapper_SparseCPU_out_sgn_out));
m.impl("sgn_",
TORCH_FN(wrapper_SparseCPU__sgn_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor sgn(const at::Tensor & self) {
return wrapper_SparseCPU__sgn(self);
}
at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_sgn_out(self, out);
}
at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_sgn_out(self, out);
}
at::Tensor & sgn_(at::Tensor & self) {
return wrapper_SparseCPU__sgn_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseCPU_out_conj_physical_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::conj_physical_out_sparse(self, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("conj_physical.out",
TORCH_FN(wrapper_SparseCPU_out_conj_physical_out));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_conj_physical_out(self, out);
}
at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_conj_physical_out(self, out);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU_Tensor_add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::add_sparse(self, other, alpha);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_add_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::add_out_sparse_cpu(self, other, alpha, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_Tensor_add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::add_sparse_(self, other, alpha);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("add.Tensor",
TORCH_FN(wrapper_SparseCPU_Tensor_add));
m.impl("add.out",
TORCH_FN(wrapper_SparseCPU_out_add_out));
m.impl("add_.Tensor",
TORCH_FN(wrapper_SparseCPU_Tensor_add_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_SparseCPU_Tensor_add(self, other, alpha);
}
at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_SparseCPU_out_add_out(self, other, alpha, out);
}
at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_SparseCPU_out_add_out(self, other, alpha, out);
}
at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_SparseCPU_Tensor_add_(self, other, alpha);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__asinh(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::asinh_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_asinh_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::asinh_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__asinh_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::asinh_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("asinh",
TORCH_FN(wrapper_SparseCPU__asinh));
m.impl("asinh.out",
TORCH_FN(wrapper_SparseCPU_out_asinh_out));
m.impl("asinh_",
TORCH_FN(wrapper_SparseCPU__asinh_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor asinh(const at::Tensor & self) {
return wrapper_SparseCPU__asinh(self);
}
at::Tensor & asinh_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_asinh_out(self, out);
}
at::Tensor & asinh_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_asinh_out(self, out);
}
at::Tensor & asinh_(at::Tensor & self) {
return wrapper_SparseCPU__asinh_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__atanh(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::atanh_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_atanh_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::atanh_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__atanh_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::atanh_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("atanh",
TORCH_FN(wrapper_SparseCPU__atanh));
m.impl("atanh.out",
TORCH_FN(wrapper_SparseCPU_out_atanh_out));
m.impl("atanh_",
TORCH_FN(wrapper_SparseCPU__atanh_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor atanh(const at::Tensor & self) {
return wrapper_SparseCPU__atanh(self);
}
at::Tensor & atanh_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_atanh_out(self, out);
}
at::Tensor & atanh_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_atanh_out(self, out);
}
at::Tensor & atanh_(at::Tensor & self) {
return wrapper_SparseCPU__atanh_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__asin(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::asin_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_asin_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::asin_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__asin_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::asin_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("asin",
TORCH_FN(wrapper_SparseCPU__asin));
m.impl("asin.out",
TORCH_FN(wrapper_SparseCPU_out_asin_out));
m.impl("asin_",
TORCH_FN(wrapper_SparseCPU__asin_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor asin(const at::Tensor & self) {
return wrapper_SparseCPU__asin(self);
}
at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_asin_out(self, out);
}
at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_asin_out(self, out);
}
at::Tensor & asin_(at::Tensor & self) {
return wrapper_SparseCPU__asin_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__atan(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::atan_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_atan_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::atan_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__atan_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::atan_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("atan",
TORCH_FN(wrapper_SparseCPU__atan));
m.impl("atan.out",
TORCH_FN(wrapper_SparseCPU_out_atan_out));
m.impl("atan_",
TORCH_FN(wrapper_SparseCPU__atan_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor atan(const at::Tensor & self) {
return wrapper_SparseCPU__atan(self);
}
at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_atan_out(self, out);
}
at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_atan_out(self, out);
}
at::Tensor & atan_(at::Tensor & self) {
return wrapper_SparseCPU__atan_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__bmm(const at::Tensor & self, const at::Tensor & mat2) {
    // No device check
  // DeviceGuard omitted
  return at::native::bmm_sparse_cpu(self, mat2);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_bmm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::bmm_out_sparse_cpu(self, mat2, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("bmm",
TORCH_FN(wrapper_SparseCPU__bmm));
m.impl("bmm.out",
TORCH_FN(wrapper_SparseCPU_out_bmm_out));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor bmm(const at::Tensor & self, const at::Tensor & mat2) {
return wrapper_SparseCPU__bmm(self, mat2);
}
at::Tensor & bmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
return wrapper_SparseCPU_out_bmm_out(self, mat2, out);
}
at::Tensor & bmm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
return wrapper_SparseCPU_out_bmm_out(self, mat2, out);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___sparse_broadcast_to(const at::Tensor & self, at::IntArrayRef size) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_broadcast_to(self, size);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_sparse_broadcast_to",
TORCH_FN(wrapper_SparseCPU___sparse_broadcast_to));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _sparse_broadcast_to(const at::Tensor & self, at::IntArrayRef size) {
return wrapper_SparseCPU___sparse_broadcast_to(self, size);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__cat(const at::ITensorListRef & tensors, int64_t dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::cat_sparse(tensors, dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("cat",
TORCH_FN(wrapper_SparseCPU__cat));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor cat(const at::ITensorListRef & tensors, int64_t dim) {
return wrapper_SparseCPU__cat(tensors, dim);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__ceil(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::ceil_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_ceil_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::ceil_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__ceil_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::ceil_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("ceil",
TORCH_FN(wrapper_SparseCPU__ceil));
m.impl("ceil.out",
TORCH_FN(wrapper_SparseCPU_out_ceil_out));
m.impl("ceil_",
TORCH_FN(wrapper_SparseCPU__ceil_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor ceil(const at::Tensor & self) {
return wrapper_SparseCPU__ceil(self);
}
at::Tensor & ceil_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_ceil_out(self, out);
}
at::Tensor & ceil_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_ceil_out(self, out);
}
at::Tensor & ceil_(at::Tensor & self) {
return wrapper_SparseCPU__ceil_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseCPU__copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
    // No device check
  // DeviceGuard omitted
  return at::native::copy_sparse_wrapper_(self, src, non_blocking);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("copy_",
TORCH_FN(wrapper_SparseCPU__copy_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor & copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
return wrapper_SparseCPU__copy_(self, src, non_blocking);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU_Tensor_div(const at::Tensor & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::div_sparse(self, other);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_div_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::div_out_sparse_zerodim(self, other, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_Tensor_div_(at::Tensor & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::div_sparse_(self, other);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("div.Tensor",
TORCH_FN(wrapper_SparseCPU_Tensor_div));
m.impl("div.out",
TORCH_FN(wrapper_SparseCPU_out_div_out));
m.impl("div_.Tensor",
TORCH_FN(wrapper_SparseCPU_Tensor_div_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor div(const at::Tensor & self, const at::Tensor & other) {
return wrapper_SparseCPU_Tensor_div(self, other);
}
at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_SparseCPU_out_div_out(self, other, out);
}
at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_SparseCPU_out_div_out(self, other, out);
}
at::Tensor & div_(at::Tensor & self, const at::Tensor & other) {
return wrapper_SparseCPU_Tensor_div_(self, other);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU_Tensor_mode_div(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
    // No device check
  // DeviceGuard omitted
  return at::native::div_sparse(self, other, rounding_mode);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_mode_div_out(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::div_out_sparse_zerodim(self, other, rounding_mode, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_Tensor_mode_div_(at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
    // No device check
  // DeviceGuard omitted
  return at::native::div_sparse_(self, other, rounding_mode);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("div.Tensor_mode",
TORCH_FN(wrapper_SparseCPU_Tensor_mode_div));
m.impl("div.out_mode",
TORCH_FN(wrapper_SparseCPU_out_mode_div_out));
m.impl("div_.Tensor_mode",
TORCH_FN(wrapper_SparseCPU_Tensor_mode_div_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor div(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
return wrapper_SparseCPU_Tensor_mode_div(self, other, rounding_mode);
}
at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
return wrapper_SparseCPU_out_mode_div_out(self, other, rounding_mode, out);
}
at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out) {
return wrapper_SparseCPU_out_mode_div_out(self, other, rounding_mode, out);
}
at::Tensor & div_(at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
return wrapper_SparseCPU_Tensor_mode_div_(self, other, rounding_mode);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU_memory_format_empty(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    // No device check
  // DeviceGuard omitted
  return at::native::empty_sparse(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory, memory_format);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("empty.memory_format",
TORCH_FN(wrapper_SparseCPU_memory_format_empty));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor empty(at::IntArrayRef size, at::TensorOptions options, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCPU_memory_format_empty(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
}
at::Tensor empty(at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCPU_memory_format_empty(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format);
}
at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCPU_memory_format_empty(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
}
at::Tensor empty_symint(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCPU_memory_format_empty(size, dtype, layout, device, pin_memory, memory_format);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__empty_like(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    // No device check
  // DeviceGuard omitted
  return at::native::empty_like_sparse_coo(self, dtype, layout, device, pin_memory, memory_format);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("empty_like",
TORCH_FN(wrapper_SparseCPU__empty_like));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCPU__empty_like(self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
}
at::Tensor empty_like(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCPU__empty_like(self, dtype, layout, device, pin_memory, memory_format);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__erf(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::erf_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_erf_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::erf_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__erf_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::erf_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("erf",
TORCH_FN(wrapper_SparseCPU__erf));
m.impl("erf.out",
TORCH_FN(wrapper_SparseCPU_out_erf_out));
m.impl("erf_",
TORCH_FN(wrapper_SparseCPU__erf_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor erf(const at::Tensor & self) {
return wrapper_SparseCPU__erf(self);
}
at::Tensor & erf_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_erf_out(self, out);
}
at::Tensor & erf_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_erf_out(self, out);
}
at::Tensor & erf_(at::Tensor & self) {
return wrapper_SparseCPU__erf_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__expm1(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::expm1_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_expm1_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::expm1_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__expm1_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::expm1_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("expm1",
TORCH_FN(wrapper_SparseCPU__expm1));
m.impl("expm1.out",
TORCH_FN(wrapper_SparseCPU_out_expm1_out));
m.impl("expm1_",
TORCH_FN(wrapper_SparseCPU__expm1_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor expm1(const at::Tensor & self) {
return wrapper_SparseCPU__expm1(self);
}
at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_expm1_out(self, out);
}
at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_expm1_out(self, out);
}
at::Tensor & expm1_(at::Tensor & self) {
return wrapper_SparseCPU__expm1_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__floor(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::floor_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_floor_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::floor_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__floor_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::floor_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("floor",
TORCH_FN(wrapper_SparseCPU__floor));
m.impl("floor.out",
TORCH_FN(wrapper_SparseCPU_out_floor_out));
m.impl("floor_",
TORCH_FN(wrapper_SparseCPU__floor_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor floor(const at::Tensor & self) {
return wrapper_SparseCPU__floor(self);
}
at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_floor_out(self, out);
}
at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_floor_out(self, out);
}
at::Tensor & floor_(at::Tensor & self) {
return wrapper_SparseCPU__floor_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__floor_divide(const at::Tensor & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::floor_divide_sparse(self, other);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_floor_divide_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::floor_divide_out_sparse_zerodim(self, other, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_Tensor_floor_divide_(at::Tensor & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::floor_divide_sparse_(self, other);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("floor_divide",
TORCH_FN(wrapper_SparseCPU__floor_divide));
m.impl("floor_divide.out",
TORCH_FN(wrapper_SparseCPU_out_floor_divide_out));
m.impl("floor_divide_.Tensor",
TORCH_FN(wrapper_SparseCPU_Tensor_floor_divide_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor floor_divide(const at::Tensor & self, const at::Tensor & other) {
return wrapper_SparseCPU__floor_divide(self, other);
}
at::Tensor & floor_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_SparseCPU_out_floor_divide_out(self, other, out);
}
at::Tensor & floor_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_SparseCPU_out_floor_divide_out(self, other, out);
}
at::Tensor & floor_divide_(at::Tensor & self, const at::Tensor & other) {
return wrapper_SparseCPU_Tensor_floor_divide_(self, other);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__frac(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::frac_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_frac_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::frac_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__frac_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::frac_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("frac",
TORCH_FN(wrapper_SparseCPU__frac));
m.impl("frac.out",
TORCH_FN(wrapper_SparseCPU_out_frac_out));
m.impl("frac_",
TORCH_FN(wrapper_SparseCPU__frac_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor frac(const at::Tensor & self) {
return wrapper_SparseCPU__frac(self);
}
at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_frac_out(self, out);
}
at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_frac_out(self, out);
}
at::Tensor & frac_(at::Tensor & self) {
return wrapper_SparseCPU__frac_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__isnan(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::isnan_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("isnan",
TORCH_FN(wrapper_SparseCPU__isnan));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor isnan(const at::Tensor & self) {
return wrapper_SparseCPU__isnan(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__nan_to_num(const at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf) {
    // No device check
  // DeviceGuard omitted
  return at::native::nan_to_num_sparse(self, nan, posinf, neginf);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_nan_to_num_out(const at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::nan_to_num_sparse_out(self, nan, posinf, neginf, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__nan_to_num_(at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf) {
    // No device check
  // DeviceGuard omitted
  return at::native::nan_to_num_sparse_(self, nan, posinf, neginf);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("nan_to_num",
TORCH_FN(wrapper_SparseCPU__nan_to_num));
m.impl("nan_to_num.out",
TORCH_FN(wrapper_SparseCPU_out_nan_to_num_out));
m.impl("nan_to_num_",
TORCH_FN(wrapper_SparseCPU__nan_to_num_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor nan_to_num(const at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf) {
return wrapper_SparseCPU__nan_to_num(self, nan, posinf, neginf);
}
at::Tensor & nan_to_num_out(at::Tensor & out, const at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf) {
return wrapper_SparseCPU_out_nan_to_num_out(self, nan, posinf, neginf, out);
}
at::Tensor & nan_to_num_outf(const at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf, at::Tensor & out) {
return wrapper_SparseCPU_out_nan_to_num_out(self, nan, posinf, neginf, out);
}
at::Tensor & nan_to_num_(at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf) {
return wrapper_SparseCPU__nan_to_num_(self, nan, posinf, neginf);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__log1p(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::log1p_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_log1p_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::log1p_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__log1p_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::log1p_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("log1p",
TORCH_FN(wrapper_SparseCPU__log1p));
m.impl("log1p.out",
TORCH_FN(wrapper_SparseCPU_out_log1p_out));
m.impl("log1p_",
TORCH_FN(wrapper_SparseCPU__log1p_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor log1p(const at::Tensor & self) {
return wrapper_SparseCPU__log1p(self);
}
at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_log1p_out(self, out);
}
at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_log1p_out(self, out);
}
at::Tensor & log1p_(at::Tensor & self) {
return wrapper_SparseCPU__log1p_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__mm(const at::Tensor & self, const at::Tensor & mat2) {
    // No device check
  // DeviceGuard omitted
  return at::native::_sparse_mm(self, mat2);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_mm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::_sparse_mm_out(self, mat2, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("mm",
TORCH_FN(wrapper_SparseCPU__mm));
m.impl("mm.out",
TORCH_FN(wrapper_SparseCPU_out_mm_out));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2) {
return wrapper_SparseCPU__mm(self, mat2);
}
at::Tensor & mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
return wrapper_SparseCPU_out_mm_out(self, mat2, out);
}
at::Tensor & mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
return wrapper_SparseCPU_out_mm_out(self, mat2, out);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___sparse_sparse_matmul(const at::Tensor & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_sparse_matmul_cpu(self, other);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_sparse_sparse_matmul",
TORCH_FN(wrapper_SparseCPU___sparse_sparse_matmul));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _sparse_sparse_matmul(const at::Tensor & self, const at::Tensor & other) {
return wrapper_SparseCPU___sparse_sparse_matmul(self, other);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU_Tensor_mul(const at::Tensor & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::mul_sparse(self, other);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_mul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::mul_out_sparse_cpu(self, other, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_Tensor_mul_(at::Tensor & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::mul_sparse_(self, other);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("mul.Tensor",
TORCH_FN(wrapper_SparseCPU_Tensor_mul));
m.impl("mul.out",
TORCH_FN(wrapper_SparseCPU_out_mul_out));
m.impl("mul_.Tensor",
TORCH_FN(wrapper_SparseCPU_Tensor_mul_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor mul(const at::Tensor & self, const at::Tensor & other) {
return wrapper_SparseCPU_Tensor_mul(self, other);
}
at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_SparseCPU_out_mul_out(self, other, out);
}
at::Tensor & mul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_SparseCPU_out_mul_out(self, other, out);
}
at::Tensor & mul_(at::Tensor & self, const at::Tensor & other) {
return wrapper_SparseCPU_Tensor_mul_(self, other);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__mv(const at::Tensor & self, const at::Tensor & vec) {
    // No device check
  // DeviceGuard omitted
  return at::native::mv_sparse(self, vec);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("mv",
TORCH_FN(wrapper_SparseCPU__mv));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor mv(const at::Tensor & self, const at::Tensor & vec) {
return wrapper_SparseCPU__mv(self, vec);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__narrow_copy(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
    // No device check
  // DeviceGuard omitted
  return at::native::narrow_copy_sparse(self, dim, start.guard_int(__FILE__, __LINE__), length.guard_int(__FILE__, __LINE__));
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("narrow_copy",
TORCH_FN(wrapper_SparseCPU__narrow_copy));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor narrow_copy(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
return wrapper_SparseCPU__narrow_copy(self, dim, start, length);
}
at::Tensor narrow_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
return wrapper_SparseCPU__narrow_copy(self, dim, start, length);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__permute(const at::Tensor & self, at::IntArrayRef dims) {
    // No device check
  // DeviceGuard omitted
  return at::native::permute_sparse_coo(self, dims);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("permute",
TORCH_FN(wrapper_SparseCPU__permute));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor permute(const at::Tensor & self, at::IntArrayRef dims) {
return wrapper_SparseCPU__permute(self, dims);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
bool wrapper_SparseCPU__is_pinned(const at::Tensor & self, ::std::optional<at::Device> device) {
    // No device check
  // DeviceGuard omitted
  return at::native::is_pinned_sparse_coo(self, device);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("is_pinned",
TORCH_FN(wrapper_SparseCPU__is_pinned));
}
} // anonymous namespace
namespace sparsecpu {
bool is_pinned(const at::Tensor & self, ::std::optional<at::Device> device) {
return wrapper_SparseCPU__is_pinned(self, device);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___pin_memory(const at::Tensor & self, ::std::optional<at::Device> device) {
    // No device check
  // DeviceGuard omitted
  return at::native::_pin_memory_sparse_coo(self, device);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_pin_memory",
TORCH_FN(wrapper_SparseCPU___pin_memory));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _pin_memory(const at::Tensor & self, ::std::optional<at::Device> device) {
return wrapper_SparseCPU___pin_memory(self, device);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__rad2deg(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::rad2deg_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_rad2deg_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::rad2deg_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__rad2deg_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::rad2deg_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("rad2deg",
TORCH_FN(wrapper_SparseCPU__rad2deg));
m.impl("rad2deg.out",
TORCH_FN(wrapper_SparseCPU_out_rad2deg_out));
m.impl("rad2deg_",
TORCH_FN(wrapper_SparseCPU__rad2deg_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor rad2deg(const at::Tensor & self) {
return wrapper_SparseCPU__rad2deg(self);
}
at::Tensor & rad2deg_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_rad2deg_out(self, out);
}
at::Tensor & rad2deg_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_rad2deg_out(self, out);
}
at::Tensor & rad2deg_(at::Tensor & self) {
return wrapper_SparseCPU__rad2deg_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__deg2rad(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::deg2rad_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_deg2rad_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::deg2rad_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__deg2rad_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::deg2rad_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("deg2rad",
TORCH_FN(wrapper_SparseCPU__deg2rad));
m.impl("deg2rad.out",
TORCH_FN(wrapper_SparseCPU_out_deg2rad_out));
m.impl("deg2rad_",
TORCH_FN(wrapper_SparseCPU__deg2rad_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor deg2rad(const at::Tensor & self) {
return wrapper_SparseCPU__deg2rad(self);
}
at::Tensor & deg2rad_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_deg2rad_out(self, out);
}
at::Tensor & deg2rad_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_deg2rad_out(self, out);
}
at::Tensor & deg2rad_(at::Tensor & self) {
return wrapper_SparseCPU__deg2rad_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__neg(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::neg_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_neg_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::neg_out_sparse(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__neg_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::neg_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("neg",
TORCH_FN(wrapper_SparseCPU__neg));
m.impl("neg.out",
TORCH_FN(wrapper_SparseCPU_out_neg_out));
m.impl("neg_",
TORCH_FN(wrapper_SparseCPU__neg_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor neg(const at::Tensor & self) {
return wrapper_SparseCPU__neg(self);
}
at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_neg_out(self, out);
}
at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_neg_out(self, out);
}
at::Tensor & neg_(at::Tensor & self) {
return wrapper_SparseCPU__neg_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__round(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::round_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_round_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::round_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__round_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::round_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("round",
TORCH_FN(wrapper_SparseCPU__round));
m.impl("round.out",
TORCH_FN(wrapper_SparseCPU_out_round_out));
m.impl("round_",
TORCH_FN(wrapper_SparseCPU__round_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor round(const at::Tensor & self) {
return wrapper_SparseCPU__round(self);
}
at::Tensor & round_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_round_out(self, out);
}
at::Tensor & round_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_round_out(self, out);
}
at::Tensor & round_(at::Tensor & self) {
return wrapper_SparseCPU__round_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__relu(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::relu_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__relu_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::relu_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("relu",
TORCH_FN(wrapper_SparseCPU__relu));
m.impl("relu_",
TORCH_FN(wrapper_SparseCPU__relu_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor relu(const at::Tensor & self) {
return wrapper_SparseCPU__relu(self);
}
at::Tensor & relu_(at::Tensor & self) {
return wrapper_SparseCPU__relu_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__sin(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sin_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_sin_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::sin_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__sin_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sin_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("sin",
TORCH_FN(wrapper_SparseCPU__sin));
m.impl("sin.out",
TORCH_FN(wrapper_SparseCPU_out_sin_out));
m.impl("sin_",
TORCH_FN(wrapper_SparseCPU__sin_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor sin(const at::Tensor & self) {
return wrapper_SparseCPU__sin(self);
}
at::Tensor & sin_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_sin_out(self, out);
}
at::Tensor & sin_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_sin_out(self, out);
}
at::Tensor & sin_(at::Tensor & self) {
return wrapper_SparseCPU__sin_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__sinh(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sinh_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_sinh_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::sinh_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__sinh_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sinh_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("sinh",
TORCH_FN(wrapper_SparseCPU__sinh));
m.impl("sinh.out",
TORCH_FN(wrapper_SparseCPU_out_sinh_out));
m.impl("sinh_",
TORCH_FN(wrapper_SparseCPU__sinh_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor sinh(const at::Tensor & self) {
return wrapper_SparseCPU__sinh(self);
}
at::Tensor & sinh_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_sinh_out(self, out);
}
at::Tensor & sinh_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_sinh_out(self, out);
}
at::Tensor & sinh_(at::Tensor & self) {
return wrapper_SparseCPU__sinh_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseCPU_out_sspaddmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::_sspaddmm_out_cpu(self, mat1, mat2, beta, alpha, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("sspaddmm.out",
TORCH_FN(wrapper_SparseCPU_out_sspaddmm_out));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor & sspaddmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_SparseCPU_out_sspaddmm_out(self, mat1, mat2, beta, alpha, out);
}
at::Tensor & sspaddmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_SparseCPU_out_sspaddmm_out(self, mat1, mat2, beta, alpha, out);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__sum(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::sum_coo(self, dtype);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("sum",
TORCH_FN(wrapper_SparseCPU__sum));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor sum(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
return wrapper_SparseCPU__sum(self, dtype);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU_dim_IntList_sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::sum_sparse_coo(self, dim, keepdim, dtype);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("sum.dim_IntList",
TORCH_FN(wrapper_SparseCPU_dim_IntList_sum));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
return wrapper_SparseCPU_dim_IntList_sum(self, dim, keepdim, dtype);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__sqrt(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sqrt_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_sqrt_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::sqrt_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__sqrt_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sqrt_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("sqrt",
TORCH_FN(wrapper_SparseCPU__sqrt));
m.impl("sqrt.out",
TORCH_FN(wrapper_SparseCPU_out_sqrt_out));
m.impl("sqrt_",
TORCH_FN(wrapper_SparseCPU__sqrt_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor sqrt(const at::Tensor & self) {
return wrapper_SparseCPU__sqrt(self);
}
at::Tensor & sqrt_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_sqrt_out(self, out);
}
at::Tensor & sqrt_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_sqrt_out(self, out);
}
at::Tensor & sqrt_(at::Tensor & self) {
return wrapper_SparseCPU__sqrt_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__tan(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::tan_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_tan_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::tan_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__tan_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::tan_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("tan",
TORCH_FN(wrapper_SparseCPU__tan));
m.impl("tan.out",
TORCH_FN(wrapper_SparseCPU_out_tan_out));
m.impl("tan_",
TORCH_FN(wrapper_SparseCPU__tan_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor tan(const at::Tensor & self) {
return wrapper_SparseCPU__tan(self);
}
at::Tensor & tan_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_tan_out(self, out);
}
at::Tensor & tan_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_tan_out(self, out);
}
at::Tensor & tan_(at::Tensor & self) {
return wrapper_SparseCPU__tan_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__tanh(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::tanh_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_tanh_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::tanh_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__tanh_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::tanh_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("tanh",
TORCH_FN(wrapper_SparseCPU__tanh));
m.impl("tanh.out",
TORCH_FN(wrapper_SparseCPU_out_tanh_out));
m.impl("tanh_",
TORCH_FN(wrapper_SparseCPU__tanh_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor tanh(const at::Tensor & self) {
return wrapper_SparseCPU__tanh(self);
}
at::Tensor & tanh_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_tanh_out(self, out);
}
at::Tensor & tanh_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_tanh_out(self, out);
}
at::Tensor & tanh_(at::Tensor & self) {
return wrapper_SparseCPU__tanh_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
    // No device check
  // DeviceGuard omitted
  return at::native::threshold_backward_sparse(grad_output, self, threshold);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_grad_input_threshold_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
    // No device check
  // DeviceGuard omitted
  return at::native::threshold_backward_sparse_out(grad_output, self, threshold, grad_input);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("threshold_backward",
TORCH_FN(wrapper_SparseCPU__threshold_backward));
m.impl("threshold_backward.grad_input",
TORCH_FN(wrapper_SparseCPU_grad_input_threshold_backward_out));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
return wrapper_SparseCPU__threshold_backward(grad_output, self, threshold);
}
at::Tensor & threshold_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
return wrapper_SparseCPU_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input);
}
at::Tensor & threshold_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
return wrapper_SparseCPU_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__trunc(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::trunc_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_trunc_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::trunc_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__trunc_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::trunc_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("trunc",
TORCH_FN(wrapper_SparseCPU__trunc));
m.impl("trunc.out",
TORCH_FN(wrapper_SparseCPU_out_trunc_out));
m.impl("trunc_",
TORCH_FN(wrapper_SparseCPU__trunc_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor trunc(const at::Tensor & self) {
return wrapper_SparseCPU__trunc(self);
}
at::Tensor & trunc_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_trunc_out(self, out);
}
at::Tensor & trunc_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_trunc_out(self, out);
}
at::Tensor & trunc_(at::Tensor & self) {
return wrapper_SparseCPU__trunc_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__unsqueeze(const at::Tensor & self, int64_t dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::unsqueeze_sparse(self, dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("unsqueeze",
TORCH_FN(wrapper_SparseCPU__unsqueeze));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor unsqueeze(const at::Tensor & self, int64_t dim) {
return wrapper_SparseCPU__unsqueeze(self, dim);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseCPU_out_zeros_out(c10::SymIntArrayRef size, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::zeros_sparse_out(C10_AS_INTARRAYREF_SLOW(size), out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("zeros.out",
TORCH_FN(wrapper_SparseCPU_out_zeros_out));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor & zeros_out(at::Tensor & out, at::IntArrayRef size) {
return wrapper_SparseCPU_out_zeros_out(c10::fromIntArrayRefSlow(size), out);
}
at::Tensor & zeros_outf(at::IntArrayRef size, at::Tensor & out) {
return wrapper_SparseCPU_out_zeros_out(c10::fromIntArrayRefSlow(size), out);
}
at::Tensor & zeros_symint_out(at::Tensor & out, c10::SymIntArrayRef size) {
return wrapper_SparseCPU_out_zeros_out(size, out);
}
at::Tensor & zeros_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) {
return wrapper_SparseCPU_out_zeros_out(size, out);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__native_norm(const at::Tensor & self, const at::Scalar & p) {
    // No device check
  // DeviceGuard omitted
  return at::native::norm_sparse(self, p);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("native_norm",
TORCH_FN(wrapper_SparseCPU__native_norm));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor native_norm(const at::Tensor & self, const at::Scalar & p) {
return wrapper_SparseCPU__native_norm(self, p);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU_ScalarOpt_dim_dtype_native_norm(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::norm_sparse(self, p, dim, keepdim, dtype);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("native_norm.ScalarOpt_dim_dtype",
TORCH_FN(wrapper_SparseCPU_ScalarOpt_dim_dtype_native_norm));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor native_norm(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
return wrapper_SparseCPU_ScalarOpt_dim_dtype_native_norm(self, p, dim, keepdim, dtype);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___sparse_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::_sparse_sum_backward_cpu(grad, self, dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_sparse_sum_backward",
TORCH_FN(wrapper_SparseCPU___sparse_sum_backward));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _sparse_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
return wrapper_SparseCPU___sparse_sum_backward(grad, self, dim);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___sparse_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
    // No device check
  // DeviceGuard omitted
  return at::native::softmax_sparse_cpu(self, dim, half_to_float);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_sparse_softmax",
TORCH_FN(wrapper_SparseCPU___sparse_softmax));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _sparse_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
return wrapper_SparseCPU___sparse_softmax(self, dim, half_to_float);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___sparse_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::softmax_backward_sparse_cpu(grad_output, output, dim, self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_sparse_softmax_backward_data",
TORCH_FN(wrapper_SparseCPU___sparse_softmax_backward_data));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _sparse_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
return wrapper_SparseCPU___sparse_softmax_backward_data(grad_output, output, dim, self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___sparse_log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
    // No device check
  // DeviceGuard omitted
  return at::native::log_softmax_sparse_cpu(self, dim, half_to_float);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_sparse_log_softmax",
TORCH_FN(wrapper_SparseCPU___sparse_log_softmax));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _sparse_log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
return wrapper_SparseCPU___sparse_log_softmax(self, dim, half_to_float);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___sparse_log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::log_softmax_backward_sparse_cpu(grad_output, output, dim, self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_sparse_log_softmax_backward_data",
TORCH_FN(wrapper_SparseCPU___sparse_log_softmax_backward_data));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _sparse_log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
return wrapper_SparseCPU___sparse_log_softmax_backward_data(grad_output, output, dim, self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU_ScalarOpt_dim_dtype_norm(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_dtype_norm(self, p, dim, keepdim, dtype);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("norm.ScalarOpt_dim_dtype",
TORCH_FN(wrapper_SparseCPU_ScalarOpt_dim_dtype_norm));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor norm(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
return wrapper_SparseCPU_ScalarOpt_dim_dtype_norm(self, p, dim, keepdim, dtype);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU_ScalarOpt_dim_norm(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_norm(self, p, dim, keepdim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("norm.ScalarOpt_dim",
TORCH_FN(wrapper_SparseCPU_ScalarOpt_dim_norm));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor norm(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
return wrapper_SparseCPU_ScalarOpt_dim_norm(self, p, dim, keepdim);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__clone(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format) {
    // No device check
  // DeviceGuard omitted
  return at::native::clone_sparse(self, memory_format);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("clone",
TORCH_FN(wrapper_SparseCPU__clone));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor clone(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCPU__clone(self, memory_format);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
const at::Tensor & wrapper_SparseCPU__resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) {
    // No device check
  // DeviceGuard omitted
  return at::native::resize_as_sparse_(self, the_template);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("resize_as_sparse_",
TORCH_FN(wrapper_SparseCPU__resize_as_sparse_));
}
} // anonymous namespace
namespace sparsecpu {
const at::Tensor & resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) {
return wrapper_SparseCPU__resize_as_sparse_(self, the_template);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseCPU__zero_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::zero_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("zero_",
TORCH_FN(wrapper_SparseCPU__zero_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor & zero_(at::Tensor & self) {
return wrapper_SparseCPU__zero_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU_Tensor_sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::sub_sparse(self, other, alpha);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_sub_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::sub_out_sparse(self, other, alpha, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_Tensor_sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::sub_sparse_(self, other, alpha);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("sub.Tensor",
TORCH_FN(wrapper_SparseCPU_Tensor_sub));
m.impl("sub.out",
TORCH_FN(wrapper_SparseCPU_out_sub_out));
m.impl("sub_.Tensor",
TORCH_FN(wrapper_SparseCPU_Tensor_sub_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_SparseCPU_Tensor_sub(self, other, alpha);
}
at::Tensor & sub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_SparseCPU_out_sub_out(self, other, alpha, out);
}
at::Tensor & sub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_SparseCPU_out_sub_out(self, other, alpha, out);
}
at::Tensor & sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_SparseCPU_Tensor_sub_(self, other, alpha);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::addmm_sparse_dense_cpu(self, mat1, mat2, beta, alpha);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::addmm_out_sparse_dense_cpu(self, mat1, mat2, beta, alpha, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::s_addmm_sparse_dense_cpu_(self, mat1, mat2, beta, alpha);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("addmm",
TORCH_FN(wrapper_SparseCPU__addmm));
m.impl("addmm.out",
TORCH_FN(wrapper_SparseCPU_out_addmm_out));
m.impl("addmm_",
TORCH_FN(wrapper_SparseCPU__addmm_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_SparseCPU__addmm(self, mat1, mat2, beta, alpha);
}
at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_SparseCPU_out_addmm_out(self, mat1, mat2, beta, alpha, out);
}
at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_SparseCPU_out_addmm_out(self, mat1, mat2, beta, alpha, out);
}
at::Tensor & addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_SparseCPU__addmm_(self, mat1, mat2, beta, alpha);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    // No device check
  // DeviceGuard omitted
  return at::native::new_with_dims_sparse(sparse_dim, dense_dim, size, dtype, layout, device, pin_memory);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_sparse_coo_tensor_with_dims",
TORCH_FN(wrapper_SparseCPU___sparse_coo_tensor_with_dims));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options) {
return wrapper_SparseCPU___sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
}
at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
return wrapper_SparseCPU___sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, dtype, layout, device, pin_memory);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
    // No device check
  // DeviceGuard omitted
  return at::native::new_with_dims_and_tensor_sparse_symint(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_sparse_coo_tensor_with_dims_and_tensors",
TORCH_FN(wrapper_SparseCPU___sparse_coo_tensor_with_dims_and_tensors));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, ::std::optional<bool> is_coalesced) {
return wrapper_SparseCPU___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced);
}
at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
return wrapper_SparseCPU___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, dtype, layout, device, pin_memory, is_coalesced);
}
at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, ::std::optional<bool> is_coalesced) {
return wrapper_SparseCPU___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced);
}
at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
return wrapper_SparseCPU___sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
const at::Tensor & wrapper_SparseCPU__sparse_resize_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_resize_(self, size, sparse_dim, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("sparse_resize_",
TORCH_FN(wrapper_SparseCPU__sparse_resize_));
}
} // anonymous namespace
namespace sparsecpu {
const at::Tensor & sparse_resize_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
return wrapper_SparseCPU__sparse_resize_(self, size, sparse_dim, dense_dim);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
const at::Tensor & wrapper_SparseCPU__sparse_resize_and_clear_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_resize_and_clear_(self, size, sparse_dim, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("sparse_resize_and_clear_",
TORCH_FN(wrapper_SparseCPU__sparse_resize_and_clear_));
}
} // anonymous namespace
namespace sparsecpu {
const at::Tensor & sparse_resize_and_clear_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
return wrapper_SparseCPU__sparse_resize_and_clear_(self, size, sparse_dim, dense_dim);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__sparse_mask(const at::Tensor & self, const at::Tensor & mask) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_mask(self, mask);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("sparse_mask",
TORCH_FN(wrapper_SparseCPU__sparse_mask));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor sparse_mask(const at::Tensor & self, const at::Tensor & mask) {
return wrapper_SparseCPU__sparse_mask(self, mask);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___sparse_mask_projection(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_mask_projection(self, mask, accumulate_matches);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_sparse_mask_projection",
TORCH_FN(wrapper_SparseCPU___sparse_mask_projection));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _sparse_mask_projection(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches) {
return wrapper_SparseCPU___sparse_mask_projection(self, mask, accumulate_matches);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___to_dense(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_to_dense(self, dtype, masked_grad);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_to_dense",
TORCH_FN(wrapper_SparseCPU___to_dense));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _to_dense(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad) {
return wrapper_SparseCPU___to_dense(self, dtype, masked_grad);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
int64_t wrapper_SparseCPU__sparse_dim(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_dim_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("sparse_dim",
TORCH_FN(wrapper_SparseCPU__sparse_dim));
}
} // anonymous namespace
namespace sparsecpu {
int64_t sparse_dim(const at::Tensor & self) {
return wrapper_SparseCPU__sparse_dim(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
int64_t wrapper_SparseCPU___dimI(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_dim_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_dimI",
TORCH_FN(wrapper_SparseCPU___dimI));
}
} // anonymous namespace
namespace sparsecpu {
int64_t _dimI(const at::Tensor & self) {
return wrapper_SparseCPU___dimI(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
int64_t wrapper_SparseCPU__dense_dim(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::dense_dim_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("dense_dim",
TORCH_FN(wrapper_SparseCPU__dense_dim));
}
} // anonymous namespace
namespace sparsecpu {
int64_t dense_dim(const at::Tensor & self) {
return wrapper_SparseCPU__dense_dim(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
int64_t wrapper_SparseCPU___dimV(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::dense_dim_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_dimV",
TORCH_FN(wrapper_SparseCPU___dimV));
}
} // anonymous namespace
namespace sparsecpu {
int64_t _dimV(const at::Tensor & self) {
return wrapper_SparseCPU___dimV(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
int64_t wrapper_SparseCPU___nnz(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::_nnz_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_nnz",
TORCH_FN(wrapper_SparseCPU___nnz));
}
} // anonymous namespace
namespace sparsecpu {
int64_t _nnz(const at::Tensor & self) {
return wrapper_SparseCPU___nnz(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___coalesce(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::_coalesce_sparse_cpu(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_coalesce",
TORCH_FN(wrapper_SparseCPU___coalesce));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _coalesce(const at::Tensor & self) {
return wrapper_SparseCPU___coalesce(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
bool wrapper_SparseCPU__is_coalesced(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::is_coalesced_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("is_coalesced",
TORCH_FN(wrapper_SparseCPU__is_coalesced));
}
} // anonymous namespace
namespace sparsecpu {
bool is_coalesced(const at::Tensor & self) {
return wrapper_SparseCPU__is_coalesced(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___indices(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::_indices_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_indices",
TORCH_FN(wrapper_SparseCPU___indices));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _indices(const at::Tensor & self) {
return wrapper_SparseCPU___indices(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___values(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::_values_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_values",
TORCH_FN(wrapper_SparseCPU___values));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _values(const at::Tensor & self) {
return wrapper_SparseCPU___values(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseCPU___coalesced_(at::Tensor & self, bool coalesced) {
    // No device check
  // DeviceGuard omitted
  return at::native::_coalesced_sparse_(self, coalesced);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_coalesced_",
TORCH_FN(wrapper_SparseCPU___coalesced_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor & _coalesced_(at::Tensor & self, bool coalesced) {
return wrapper_SparseCPU___coalesced_(self, coalesced);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__indices(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::indices_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("indices",
TORCH_FN(wrapper_SparseCPU__indices));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor indices(const at::Tensor & self) {
return wrapper_SparseCPU__indices(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__values(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::values_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("values",
TORCH_FN(wrapper_SparseCPU__values));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor values(const at::Tensor & self) {
return wrapper_SparseCPU__values(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__hspmm(const at::Tensor & mat1, const at::Tensor & mat2) {
    // No device check
  // DeviceGuard omitted
  return at::native::hspmm_sparse_cpu(mat1, mat2);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_hspmm_out(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::hspmm_out_sparse_cpu(mat1, mat2, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("hspmm",
TORCH_FN(wrapper_SparseCPU__hspmm));
m.impl("hspmm.out",
TORCH_FN(wrapper_SparseCPU_out_hspmm_out));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor hspmm(const at::Tensor & mat1, const at::Tensor & mat2) {
return wrapper_SparseCPU__hspmm(mat1, mat2);
}
at::Tensor & hspmm_out(at::Tensor & out, const at::Tensor & mat1, const at::Tensor & mat2) {
return wrapper_SparseCPU_out_hspmm_out(mat1, mat2, out);
}
at::Tensor & hspmm_outf(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) {
return wrapper_SparseCPU_out_hspmm_out(mat1, mat2, out);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseCPU__copy_sparse_to_sparse_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
    // No device check
  // DeviceGuard omitted
  return at::native::copy_sparse_(self, src, non_blocking);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("copy_sparse_to_sparse_",
TORCH_FN(wrapper_SparseCPU__copy_sparse_to_sparse_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor & copy_sparse_to_sparse_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
return wrapper_SparseCPU__copy_sparse_to_sparse_(self, src, non_blocking);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU_sparse_dim__to_sparse(const at::Tensor & self, int64_t sparse_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_coo_to_sparse(self, sparse_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_to_sparse.sparse_dim",
TORCH_FN(wrapper_SparseCPU_sparse_dim__to_sparse));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _to_sparse(const at::Tensor & self, int64_t sparse_dim) {
return wrapper_SparseCPU_sparse_dim__to_sparse(self, sparse_dim);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___to_sparse(const at::Tensor & self, ::std::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_coo_to_sparse(self, layout, blocksize, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_to_sparse",
TORCH_FN(wrapper_SparseCPU___to_sparse));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _to_sparse(const at::Tensor & self, ::std::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
return wrapper_SparseCPU___to_sparse(self, layout, blocksize, dense_dim);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___to_sparse_csr(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::coo_to_sparse_csr(self, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_to_sparse_csr",
TORCH_FN(wrapper_SparseCPU___to_sparse_csr));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _to_sparse_csr(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
return wrapper_SparseCPU___to_sparse_csr(self, dense_dim);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___to_sparse_csc(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::coo_to_sparse_csc(self, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_to_sparse_csc",
TORCH_FN(wrapper_SparseCPU___to_sparse_csc));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _to_sparse_csc(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
return wrapper_SparseCPU___to_sparse_csc(self, dense_dim);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::coo_to_sparse_bsr(self, blocksize, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_to_sparse_bsr",
TORCH_FN(wrapper_SparseCPU___to_sparse_bsr));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
return wrapper_SparseCPU___to_sparse_bsr(self, blocksize, dense_dim);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU___to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::coo_to_sparse_bsc(self, blocksize, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("_to_sparse_bsc",
TORCH_FN(wrapper_SparseCPU___to_sparse_bsc));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor _to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
return wrapper_SparseCPU___to_sparse_bsc(self, blocksize, dense_dim);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
    // No device check
  // DeviceGuard omitted
  return at::native::index_select_sparse_cpu(self, dim, index);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("index_select",
TORCH_FN(wrapper_SparseCPU__index_select));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
return wrapper_SparseCPU__index_select(self, dim, index);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__erfinv(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::erfinv_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_erfinv_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::erfinv_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__erfinv_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::erfinv_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("erfinv",
TORCH_FN(wrapper_SparseCPU__erfinv));
m.impl("erfinv.out",
TORCH_FN(wrapper_SparseCPU_out_erfinv_out));
m.impl("erfinv_",
TORCH_FN(wrapper_SparseCPU__erfinv_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor erfinv(const at::Tensor & self) {
return wrapper_SparseCPU__erfinv(self);
}
at::Tensor & erfinv_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_erfinv_out(self, out);
}
at::Tensor & erfinv_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_erfinv_out(self, out);
}
at::Tensor & erfinv_(at::Tensor & self) {
return wrapper_SparseCPU__erfinv_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__sign(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sign_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_sign_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::sign_sparse_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU__sign_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sign_sparse_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("sign",
TORCH_FN(wrapper_SparseCPU__sign));
m.impl("sign.out",
TORCH_FN(wrapper_SparseCPU_out_sign_out));
m.impl("sign_",
TORCH_FN(wrapper_SparseCPU__sign_));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor sign(const at::Tensor & self) {
return wrapper_SparseCPU__sign(self);
}
at::Tensor & sign_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_sign_out(self, out);
}
at::Tensor & sign_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_sign_out(self, out);
}
at::Tensor & sign_(at::Tensor & self) {
return wrapper_SparseCPU__sign_(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__signbit(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::signbit_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_signbit_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::signbit_sparse_out(self, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("signbit",
TORCH_FN(wrapper_SparseCPU__signbit));
m.impl("signbit.out",
TORCH_FN(wrapper_SparseCPU_out_signbit_out));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor signbit(const at::Tensor & self) {
return wrapper_SparseCPU__signbit(self);
}
at::Tensor & signbit_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_signbit_out(self, out);
}
at::Tensor & signbit_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_signbit_out(self, out);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__any(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::any_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("any",
TORCH_FN(wrapper_SparseCPU__any));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor any(const at::Tensor & self) {
return wrapper_SparseCPU__any(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU_Tensor_Scalar_pow(const at::Tensor & self, const at::Scalar & exponent) {
    // No device check
  // DeviceGuard omitted
  return at::native::pow_sparse_scalar(self, exponent);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_Tensor_Scalar_out_pow_out(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::pow_out_sparse_scalar(self, exponent, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("pow.Tensor_Scalar",
TORCH_FN(wrapper_SparseCPU_Tensor_Scalar_pow));
m.impl("pow.Tensor_Scalar_out",
TORCH_FN(wrapper_SparseCPU_Tensor_Scalar_out_pow_out));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent) {
return wrapper_SparseCPU_Tensor_Scalar_pow(self, exponent);
}
at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) {
return wrapper_SparseCPU_Tensor_Scalar_out_pow_out(self, exponent, out);
}
at::Tensor & pow_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
return wrapper_SparseCPU_Tensor_Scalar_out_pow_out(self, exponent, out);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__isinf(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::isinf_sparse(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("isinf",
TORCH_FN(wrapper_SparseCPU__isinf));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor isinf(const at::Tensor & self) {
return wrapper_SparseCPU__isinf(self);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__isposinf(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::isposinf_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_isposinf_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::isposinf_sparse_out(self, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("isposinf",
TORCH_FN(wrapper_SparseCPU__isposinf));
m.impl("isposinf.out",
TORCH_FN(wrapper_SparseCPU_out_isposinf_out));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor isposinf(const at::Tensor & self) {
return wrapper_SparseCPU__isposinf(self);
}
at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_isposinf_out(self, out);
}
at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_isposinf_out(self, out);
}
} // namespace sparsecpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCPU__isneginf(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::isneginf_sparse(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCPU_out_isneginf_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::isneginf_sparse_out(self, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCPU, m) {
    m.impl("isneginf",
TORCH_FN(wrapper_SparseCPU__isneginf));
m.impl("isneginf.out",
TORCH_FN(wrapper_SparseCPU_out_isneginf_out));
}
} // anonymous namespace
namespace sparsecpu {
at::Tensor isneginf(const at::Tensor & self) {
return wrapper_SparseCPU__isneginf(self);
}
at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCPU_out_isneginf_out(self, out);
}
at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCPU_out_isneginf_out(self, out);
}
} // namespace sparsecpu
} // namespace at
