// required for old g++ to compile PRId64 macros, see
// https://github.com/pytorch/pytorch/issues/3571
// for context
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif

// an external backend might generate file within its code tree
// and check all the source files within the tree with clang-format.
// so, disable it since the backend might have a different config.
// clang-format off

// NOTE: This condition is true for all PyTorch internal libraries, it
//       just excludes external projects such as torch_xla which
//       re-use some of the PyTorch codegen machinery.
#if defined(CAFFE2_BUILD_MAIN_LIB)        || \
    defined(TORCH_CUDA_BUILD_MAIN_LIB)    || \
    defined(TORCH_HIP_BUILD_MAIN_LIB)     || \
    defined(TORCH_XPU_BUILD_MAIN_LIB)     || \
    defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \
    defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB)
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#endif

// @generated by torchgen/gen.py from RegisterDispatchKey.cpp

#include <c10/core/TensorImpl.h>
#include <c10/core/Allocator.h>
#include <ATen/DeviceGuard.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/Dispatch.h>
#include <c10/util/ExclusivelyOwned.h>
#include <c10/util/Half.h>
#include <c10/core/UndefinedTensorImpl.h>
#include <optional>
#include <ATen/Tensor.h>
#include <ATen/native/Resize.h>

#include <cstddef>
#include <functional>
#include <memory>
#include <utility>

#include <ATen/Config.h>
#include <ATen/core/op_registration/adaption.h>
#include <torch/library.h>


#include <ATen/ops/as_strided_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_strided.h>
#include <ATen/ops/_copy_from_and_resize.h>
#include <ATen/ops/_copy_from.h>
#include <c10/macros/Macros.h>
#include <ATen/ops/_conj_physical_native.h>
#include <ATen/ops/_nnz_native.h>
#include <ATen/ops/_pin_memory_native.h>
#include <ATen/ops/_sparse_csr_prod_native.h>
#include <ATen/ops/_sparse_csr_sum_native.h>
#include <ATen/ops/_sparse_mm_reduce_impl_backward_native.h>
#include <ATen/ops/_sparse_mm_reduce_impl_native.h>
#include <ATen/ops/_to_dense_native.h>
#include <ATen/ops/_to_sparse_bsc_native.h>
#include <ATen/ops/_to_sparse_bsr_native.h>
#include <ATen/ops/_to_sparse_csc_native.h>
#include <ATen/ops/_to_sparse_csr_native.h>
#include <ATen/ops/_to_sparse_native.h>
#include <ATen/ops/abs_native.h>
#include <ATen/ops/add_native.h>
#include <ATen/ops/addmm_native.h>
#include <ATen/ops/addmv_native.h>
#include <ATen/ops/angle_native.h>
#include <ATen/ops/asin_native.h>
#include <ATen/ops/asinh_native.h>
#include <ATen/ops/atan_native.h>
#include <ATen/ops/atanh_native.h>
#include <ATen/ops/ccol_indices_native.h>
#include <ATen/ops/ceil_native.h>
#include <ATen/ops/clone_native.h>
#include <ATen/ops/col_indices_native.h>
#include <ATen/ops/conj_physical_native.h>
#include <ATen/ops/copy_native.h>
#include <ATen/ops/crow_indices_native.h>
#include <ATen/ops/deg2rad_native.h>
#include <ATen/ops/dense_dim_native.h>
#include <ATen/ops/empty_like_native.h>
#include <ATen/ops/empty_native.h>
#include <ATen/ops/erf_native.h>
#include <ATen/ops/erfinv_native.h>
#include <ATen/ops/expm1_native.h>
#include <ATen/ops/fill_native.h>
#include <ATen/ops/floor_native.h>
#include <ATen/ops/frac_native.h>
#include <ATen/ops/is_pinned_native.h>
#include <ATen/ops/isinf_native.h>
#include <ATen/ops/isnan_native.h>
#include <ATen/ops/isneginf_native.h>
#include <ATen/ops/isposinf_native.h>
#include <ATen/ops/log1p_native.h>
#include <ATen/ops/mm_native.h>
#include <ATen/ops/mul_native.h>
#include <ATen/ops/neg_native.h>
#include <ATen/ops/normal_native.h>
#include <ATen/ops/rad2deg_native.h>
#include <ATen/ops/relu_native.h>
#include <ATen/ops/resize_as_sparse_native.h>
#include <ATen/ops/resize_native.h>
#include <ATen/ops/round_native.h>
#include <ATen/ops/row_indices_native.h>
#include <ATen/ops/select_copy_native.h>
#include <ATen/ops/select_native.h>
#include <ATen/ops/sgn_native.h>
#include <ATen/ops/sign_native.h>
#include <ATen/ops/signbit_native.h>
#include <ATen/ops/sin_native.h>
#include <ATen/ops/sinh_native.h>
#include <ATen/ops/sparse_dim_native.h>
#include <ATen/ops/sparse_mask_native.h>
#include <ATen/ops/sparse_sampled_addmm_native.h>
#include <ATen/ops/sqrt_native.h>
#include <ATen/ops/sum_native.h>
#include <ATen/ops/tan_native.h>
#include <ATen/ops/tanh_native.h>
#include <ATen/ops/threshold_backward_native.h>
#include <ATen/ops/triangular_solve_native.h>
#include <ATen/ops/trunc_native.h>
#include <ATen/ops/values_native.h>
#include <ATen/ops/zero_native.h>

namespace at {
namespace {
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-function")

void resize_out(const Tensor &out, IntArrayRef sizes, IntArrayRef strides, const TensorOptions &options) {
  TORCH_CHECK(options.dtype() == out.dtype(),
      "Expected out tensor to have dtype ", options.dtype(), ", but got ", out.dtype(), " instead");
  TORCH_CHECK(options.device() == out.device(),
      "Expected out tensor to have device ", options.device(), ", but got ", out.device(), " instead");
  const bool resized = at::native::resize_output(out, sizes);
  // Only restride if a resize occurred; otherwise we ignore the (advisory)
  // strides from the meta function and directly use the output tensor's
  // preexisting strides
  if (resized) {
    if (!strides.empty()) {
      TORCH_INTERNAL_ASSERT(!options.memory_format_opt().has_value());
      // TODO: avoid the redispatch here
      out.as_strided_(sizes, strides);
    } else if (options.memory_format_opt().has_value()) {
      out.unsafeGetTensorImpl()->empty_tensor_restride(*options.memory_format_opt());
    }
  }
}

void check_inplace(const Tensor &self, IntArrayRef sizes, const TensorOptions &options) {
  // These checks are needed on those operators that:
  //   1) don't use 'TensorIterator' (e.g. 'addmm' and 'baddbmm')
  //   2) have particular typing rules (e.g. 'cumsum' and 'cumprod')
  // For other operators (e.g. 'add'), 'TensorIterator' already checks
  // these things separately.
  TORCH_CHECK(options.dtype() == self.dtype(),
      "Bad in-place call: ",
      "input tensor dtype ", self.dtype(), " and output tensor dtype ", options.dtype(), " should match");
  TORCH_CHECK(options.device() == self.device(),
      "Bad in-place call: ",
      "input tensor device ", self.device(), " and output tensor device ", options.device(), " should match");
  TORCH_CHECK(sizes == self.sizes(),
      "Bad in-place call: ",
      "input tensor size ", self.sizes(), " and output tensor size ", sizes, " should match");
}
C10_DIAGNOSTIC_POP()
} // namespace
} // namespace at

// See template file RegisterDispatchDefinitions.ini
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__abs(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::abs_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_abs_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::abs_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__abs_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::abs_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("abs",
TORCH_FN(wrapper_SparseCsrCPU__abs));
m.impl("abs.out",
TORCH_FN(wrapper_SparseCsrCPU_out_abs_out));
m.impl("abs_",
TORCH_FN(wrapper_SparseCsrCPU__abs_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor abs(const at::Tensor & self) {
return wrapper_SparseCsrCPU__abs(self);
}
at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_abs_out(self, out);
}
at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_abs_out(self, out);
}
at::Tensor & abs_(at::Tensor & self) {
return wrapper_SparseCsrCPU__abs_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__angle(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::angle_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_angle_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::angle_sparse_csr_out(self, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("angle",
TORCH_FN(wrapper_SparseCsrCPU__angle));
m.impl("angle.out",
TORCH_FN(wrapper_SparseCsrCPU_out_angle_out));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor angle(const at::Tensor & self) {
return wrapper_SparseCsrCPU__angle(self);
}
at::Tensor & angle_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_angle_out(self, out);
}
at::Tensor & angle_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_angle_out(self, out);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__sgn(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sgn_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_sgn_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::sgn_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__sgn_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sgn_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("sgn",
TORCH_FN(wrapper_SparseCsrCPU__sgn));
m.impl("sgn.out",
TORCH_FN(wrapper_SparseCsrCPU_out_sgn_out));
m.impl("sgn_",
TORCH_FN(wrapper_SparseCsrCPU__sgn_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor sgn(const at::Tensor & self) {
return wrapper_SparseCsrCPU__sgn(self);
}
at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_sgn_out(self, out);
}
at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_sgn_out(self, out);
}
at::Tensor & sgn_(at::Tensor & self) {
return wrapper_SparseCsrCPU__sgn_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU___conj_physical(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::conj_physical_sparse_csr(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("_conj_physical",
TORCH_FN(wrapper_SparseCsrCPU___conj_physical));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor _conj_physical(const at::Tensor & self) {
return wrapper_SparseCsrCPU___conj_physical(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_conj_physical_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::conj_physical_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__conj_physical_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::conj_physical_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("conj_physical.out",
TORCH_FN(wrapper_SparseCsrCPU_out_conj_physical_out));
m.impl("conj_physical_",
TORCH_FN(wrapper_SparseCsrCPU__conj_physical_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_conj_physical_out(self, out);
}
at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_conj_physical_out(self, out);
}
at::Tensor & conj_physical_(at::Tensor & self) {
return wrapper_SparseCsrCPU__conj_physical_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU_Tensor_add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::add_sparse_csr(self, other, alpha);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_add_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::add_out_sparse_compressed_cpu(self, other, alpha, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_Tensor_add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::add_sparse_csr_(self, other, alpha);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("add.Tensor",
TORCH_FN(wrapper_SparseCsrCPU_Tensor_add));
m.impl("add.out",
TORCH_FN(wrapper_SparseCsrCPU_out_add_out));
m.impl("add_.Tensor",
TORCH_FN(wrapper_SparseCsrCPU_Tensor_add_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_SparseCsrCPU_Tensor_add(self, other, alpha);
}
at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_SparseCsrCPU_out_add_out(self, other, alpha, out);
}
at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_add_out(self, other, alpha, out);
}
at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
return wrapper_SparseCsrCPU_Tensor_add_(self, other, alpha);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_addmv_out(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::addmv_out_sparse_compressed(self, mat, vec, beta, alpha, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("addmv.out",
TORCH_FN(wrapper_SparseCsrCPU_out_addmv_out));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor & addmv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_SparseCsrCPU_out_addmv_out(self, mat, vec, beta, alpha, out);
}
at::Tensor & addmv_outf(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_addmv_out(self, mat, vec, beta, alpha, out);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__asinh(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::asinh_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_asinh_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::asinh_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__asinh_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::asinh_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("asinh",
TORCH_FN(wrapper_SparseCsrCPU__asinh));
m.impl("asinh.out",
TORCH_FN(wrapper_SparseCsrCPU_out_asinh_out));
m.impl("asinh_",
TORCH_FN(wrapper_SparseCsrCPU__asinh_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor asinh(const at::Tensor & self) {
return wrapper_SparseCsrCPU__asinh(self);
}
at::Tensor & asinh_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_asinh_out(self, out);
}
at::Tensor & asinh_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_asinh_out(self, out);
}
at::Tensor & asinh_(at::Tensor & self) {
return wrapper_SparseCsrCPU__asinh_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__atanh(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::atanh_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_atanh_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::atanh_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__atanh_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::atanh_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("atanh",
TORCH_FN(wrapper_SparseCsrCPU__atanh));
m.impl("atanh.out",
TORCH_FN(wrapper_SparseCsrCPU_out_atanh_out));
m.impl("atanh_",
TORCH_FN(wrapper_SparseCsrCPU__atanh_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor atanh(const at::Tensor & self) {
return wrapper_SparseCsrCPU__atanh(self);
}
at::Tensor & atanh_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_atanh_out(self, out);
}
at::Tensor & atanh_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_atanh_out(self, out);
}
at::Tensor & atanh_(at::Tensor & self) {
return wrapper_SparseCsrCPU__atanh_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__asin(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::asin_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_asin_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::asin_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__asin_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::asin_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("asin",
TORCH_FN(wrapper_SparseCsrCPU__asin));
m.impl("asin.out",
TORCH_FN(wrapper_SparseCsrCPU_out_asin_out));
m.impl("asin_",
TORCH_FN(wrapper_SparseCsrCPU__asin_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor asin(const at::Tensor & self) {
return wrapper_SparseCsrCPU__asin(self);
}
at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_asin_out(self, out);
}
at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_asin_out(self, out);
}
at::Tensor & asin_(at::Tensor & self) {
return wrapper_SparseCsrCPU__asin_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__atan(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::atan_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_atan_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::atan_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__atan_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::atan_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("atan",
TORCH_FN(wrapper_SparseCsrCPU__atan));
m.impl("atan.out",
TORCH_FN(wrapper_SparseCsrCPU_out_atan_out));
m.impl("atan_",
TORCH_FN(wrapper_SparseCsrCPU__atan_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor atan(const at::Tensor & self) {
return wrapper_SparseCsrCPU__atan(self);
}
at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_atan_out(self, out);
}
at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_atan_out(self, out);
}
at::Tensor & atan_(at::Tensor & self) {
return wrapper_SparseCsrCPU__atan_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__ceil(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::ceil_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_ceil_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::ceil_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__ceil_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::ceil_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("ceil",
TORCH_FN(wrapper_SparseCsrCPU__ceil));
m.impl("ceil.out",
TORCH_FN(wrapper_SparseCsrCPU_out_ceil_out));
m.impl("ceil_",
TORCH_FN(wrapper_SparseCsrCPU__ceil_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor ceil(const at::Tensor & self) {
return wrapper_SparseCsrCPU__ceil(self);
}
at::Tensor & ceil_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_ceil_out(self, out);
}
at::Tensor & ceil_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_ceil_out(self, out);
}
at::Tensor & ceil_(at::Tensor & self) {
return wrapper_SparseCsrCPU__ceil_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseCsrCPU__copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
    // No device check
  // DeviceGuard omitted
  return at::native::copy_sparse_compressed_(self, src, non_blocking);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("copy_",
TORCH_FN(wrapper_SparseCsrCPU__copy_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor & copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
return wrapper_SparseCsrCPU__copy_(self, src, non_blocking);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU_memory_format_empty(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    // No device check
  // DeviceGuard omitted
  return at::native::empty_sparse_compressed(C10_AS_INTARRAYREF_SLOW(size), dtype, layout, device, pin_memory, memory_format);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("empty.memory_format",
TORCH_FN(wrapper_SparseCsrCPU_memory_format_empty));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor empty(at::IntArrayRef size, at::TensorOptions options, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCsrCPU_memory_format_empty(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
}
at::Tensor empty(at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCsrCPU_memory_format_empty(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format);
}
at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCsrCPU_memory_format_empty(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
}
at::Tensor empty_symint(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCsrCPU_memory_format_empty(size, dtype, layout, device, pin_memory, memory_format);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
const at::Tensor & wrapper_SparseCsrCPU__resize_(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format) {
    // No device check
  // DeviceGuard omitted
  return at::native::resize_sparse_csr_(self, C10_AS_INTARRAYREF_SLOW(size), memory_format);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("resize_",
TORCH_FN(wrapper_SparseCsrCPU__resize_));
}
} // anonymous namespace
namespace sparsecsrcpu {
const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCsrCPU__resize_(self, c10::fromIntArrayRefSlow(size), memory_format);
}
const at::Tensor & resize__symint(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCsrCPU__resize_(self, size, memory_format);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__empty_like(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    // No device check
  // DeviceGuard omitted
  return at::native::empty_like_sparse_csr(self, dtype, layout, device, pin_memory, memory_format);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("empty_like",
TORCH_FN(wrapper_SparseCsrCPU__empty_like));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCsrCPU__empty_like(self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
}
at::Tensor empty_like(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCsrCPU__empty_like(self, dtype, layout, device, pin_memory, memory_format);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__erf(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::erf_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_erf_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::erf_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__erf_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::erf_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("erf",
TORCH_FN(wrapper_SparseCsrCPU__erf));
m.impl("erf.out",
TORCH_FN(wrapper_SparseCsrCPU_out_erf_out));
m.impl("erf_",
TORCH_FN(wrapper_SparseCsrCPU__erf_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor erf(const at::Tensor & self) {
return wrapper_SparseCsrCPU__erf(self);
}
at::Tensor & erf_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_erf_out(self, out);
}
at::Tensor & erf_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_erf_out(self, out);
}
at::Tensor & erf_(at::Tensor & self) {
return wrapper_SparseCsrCPU__erf_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__expm1(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::expm1_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_expm1_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::expm1_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__expm1_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::expm1_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("expm1",
TORCH_FN(wrapper_SparseCsrCPU__expm1));
m.impl("expm1.out",
TORCH_FN(wrapper_SparseCsrCPU_out_expm1_out));
m.impl("expm1_",
TORCH_FN(wrapper_SparseCsrCPU__expm1_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor expm1(const at::Tensor & self) {
return wrapper_SparseCsrCPU__expm1(self);
}
at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_expm1_out(self, out);
}
at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_expm1_out(self, out);
}
at::Tensor & expm1_(at::Tensor & self) {
return wrapper_SparseCsrCPU__expm1_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseCsrCPU_Scalar_fill_(at::Tensor & self, const at::Scalar & value) {
    // No device check
  // DeviceGuard omitted
  return at::native::fill_sparse_csr_(self, value);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("fill_.Scalar",
TORCH_FN(wrapper_SparseCsrCPU_Scalar_fill_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor & fill_(at::Tensor & self, const at::Scalar & value) {
return wrapper_SparseCsrCPU_Scalar_fill_(self, value);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__floor(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::floor_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_floor_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::floor_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__floor_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::floor_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("floor",
TORCH_FN(wrapper_SparseCsrCPU__floor));
m.impl("floor.out",
TORCH_FN(wrapper_SparseCsrCPU_out_floor_out));
m.impl("floor_",
TORCH_FN(wrapper_SparseCsrCPU__floor_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor floor(const at::Tensor & self) {
return wrapper_SparseCsrCPU__floor(self);
}
at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_floor_out(self, out);
}
at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_floor_out(self, out);
}
at::Tensor & floor_(at::Tensor & self) {
return wrapper_SparseCsrCPU__floor_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__frac(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::frac_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_frac_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::frac_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__frac_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::frac_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("frac",
TORCH_FN(wrapper_SparseCsrCPU__frac));
m.impl("frac.out",
TORCH_FN(wrapper_SparseCsrCPU_out_frac_out));
m.impl("frac_",
TORCH_FN(wrapper_SparseCsrCPU__frac_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor frac(const at::Tensor & self) {
return wrapper_SparseCsrCPU__frac(self);
}
at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_frac_out(self, out);
}
at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_frac_out(self, out);
}
at::Tensor & frac_(at::Tensor & self) {
return wrapper_SparseCsrCPU__frac_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__isnan(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::isnan_sparse_csr(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("isnan",
TORCH_FN(wrapper_SparseCsrCPU__isnan));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor isnan(const at::Tensor & self) {
return wrapper_SparseCsrCPU__isnan(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__log1p(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::log1p_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_log1p_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::log1p_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__log1p_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::log1p_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("log1p",
TORCH_FN(wrapper_SparseCsrCPU__log1p));
m.impl("log1p.out",
TORCH_FN(wrapper_SparseCsrCPU_out_log1p_out));
m.impl("log1p_",
TORCH_FN(wrapper_SparseCsrCPU__log1p_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor log1p(const at::Tensor & self) {
return wrapper_SparseCsrCPU__log1p(self);
}
at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_log1p_out(self, out);
}
at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_log1p_out(self, out);
}
at::Tensor & log1p_(at::Tensor & self) {
return wrapper_SparseCsrCPU__log1p_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__mm(const at::Tensor & self, const at::Tensor & mat2) {
    // No device check
  // DeviceGuard omitted
  return at::native::_sparse_csr_mm(self, mat2);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_mm_out(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::_sparse_csr_mm_out(self, mat2, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("mm",
TORCH_FN(wrapper_SparseCsrCPU__mm));
m.impl("mm.out",
TORCH_FN(wrapper_SparseCsrCPU_out_mm_out));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2) {
return wrapper_SparseCsrCPU__mm(self, mat2);
}
at::Tensor & mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
return wrapper_SparseCsrCPU_out_mm_out(self, mat2, out);
}
at::Tensor & mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_mm_out(self, mat2, out);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU_Tensor_mul(const at::Tensor & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::mul_sparse_csr(self, other);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_mul_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::mul_out_sparse_csr(self, other, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_Tensor_mul_(at::Tensor & self, const at::Tensor & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::mul_sparse_csr_(self, other);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("mul.Tensor",
TORCH_FN(wrapper_SparseCsrCPU_Tensor_mul));
m.impl("mul.out",
TORCH_FN(wrapper_SparseCsrCPU_out_mul_out));
m.impl("mul_.Tensor",
TORCH_FN(wrapper_SparseCsrCPU_Tensor_mul_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor mul(const at::Tensor & self, const at::Tensor & other) {
return wrapper_SparseCsrCPU_Tensor_mul(self, other);
}
at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
return wrapper_SparseCsrCPU_out_mul_out(self, other, out);
}
at::Tensor & mul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_mul_out(self, other, out);
}
at::Tensor & mul_(at::Tensor & self, const at::Tensor & other) {
return wrapper_SparseCsrCPU_Tensor_mul_(self, other);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU_Scalar_mul(const at::Tensor & self, const at::Scalar & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::mul_scalar_sparse_csr(self, other);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_Scalar_mul_(at::Tensor & self, const at::Scalar & other) {
    // No device check
  // DeviceGuard omitted
  return at::native::mul__scalar_sparse_csr(self, other);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("mul.Scalar",
TORCH_FN(wrapper_SparseCsrCPU_Scalar_mul));
m.impl("mul_.Scalar",
TORCH_FN(wrapper_SparseCsrCPU_Scalar_mul_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor mul(const at::Tensor & self, const at::Scalar & other) {
return wrapper_SparseCsrCPU_Scalar_mul(self, other);
}
at::Tensor & mul_(at::Tensor & self, const at::Scalar & other) {
return wrapper_SparseCsrCPU_Scalar_mul_(self, other);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
bool wrapper_SparseCsrCPU__is_pinned(const at::Tensor & self, ::std::optional<at::Device> device) {
    // No device check
  // DeviceGuard omitted
  return at::native::is_pinned_sparse_compressed(self, device);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("is_pinned",
TORCH_FN(wrapper_SparseCsrCPU__is_pinned));
}
} // anonymous namespace
namespace sparsecsrcpu {
bool is_pinned(const at::Tensor & self, ::std::optional<at::Device> device) {
return wrapper_SparseCsrCPU__is_pinned(self, device);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU___pin_memory(const at::Tensor & self, ::std::optional<at::Device> device) {
    // No device check
  // DeviceGuard omitted
  return at::native::_pin_memory_sparse_compressed(self, device);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("_pin_memory",
TORCH_FN(wrapper_SparseCsrCPU___pin_memory));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor _pin_memory(const at::Tensor & self, ::std::optional<at::Device> device) {
return wrapper_SparseCsrCPU___pin_memory(self, device);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__rad2deg(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::rad2deg_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_rad2deg_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::rad2deg_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__rad2deg_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::rad2deg_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("rad2deg",
TORCH_FN(wrapper_SparseCsrCPU__rad2deg));
m.impl("rad2deg.out",
TORCH_FN(wrapper_SparseCsrCPU_out_rad2deg_out));
m.impl("rad2deg_",
TORCH_FN(wrapper_SparseCsrCPU__rad2deg_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor rad2deg(const at::Tensor & self) {
return wrapper_SparseCsrCPU__rad2deg(self);
}
at::Tensor & rad2deg_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_rad2deg_out(self, out);
}
at::Tensor & rad2deg_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_rad2deg_out(self, out);
}
at::Tensor & rad2deg_(at::Tensor & self) {
return wrapper_SparseCsrCPU__rad2deg_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__deg2rad(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::deg2rad_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_deg2rad_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::deg2rad_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__deg2rad_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::deg2rad_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("deg2rad",
TORCH_FN(wrapper_SparseCsrCPU__deg2rad));
m.impl("deg2rad.out",
TORCH_FN(wrapper_SparseCsrCPU_out_deg2rad_out));
m.impl("deg2rad_",
TORCH_FN(wrapper_SparseCsrCPU__deg2rad_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor deg2rad(const at::Tensor & self) {
return wrapper_SparseCsrCPU__deg2rad(self);
}
at::Tensor & deg2rad_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_deg2rad_out(self, out);
}
at::Tensor & deg2rad_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_deg2rad_out(self, out);
}
at::Tensor & deg2rad_(at::Tensor & self) {
return wrapper_SparseCsrCPU__deg2rad_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__neg(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::neg_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_neg_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::neg_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__neg_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::neg_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("neg",
TORCH_FN(wrapper_SparseCsrCPU__neg));
m.impl("neg.out",
TORCH_FN(wrapper_SparseCsrCPU_out_neg_out));
m.impl("neg_",
TORCH_FN(wrapper_SparseCsrCPU__neg_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor neg(const at::Tensor & self) {
return wrapper_SparseCsrCPU__neg(self);
}
at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_neg_out(self, out);
}
at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_neg_out(self, out);
}
at::Tensor & neg_(at::Tensor & self) {
return wrapper_SparseCsrCPU__neg_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__round(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::round_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_round_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::round_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__round_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::round_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("round",
TORCH_FN(wrapper_SparseCsrCPU__round));
m.impl("round.out",
TORCH_FN(wrapper_SparseCsrCPU_out_round_out));
m.impl("round_",
TORCH_FN(wrapper_SparseCsrCPU__round_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor round(const at::Tensor & self) {
return wrapper_SparseCsrCPU__round(self);
}
at::Tensor & round_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_round_out(self, out);
}
at::Tensor & round_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_round_out(self, out);
}
at::Tensor & round_(at::Tensor & self) {
return wrapper_SparseCsrCPU__round_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__relu(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::relu_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__relu_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::relu_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("relu",
TORCH_FN(wrapper_SparseCsrCPU__relu));
m.impl("relu_",
TORCH_FN(wrapper_SparseCsrCPU__relu_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor relu(const at::Tensor & self) {
return wrapper_SparseCsrCPU__relu(self);
}
at::Tensor & relu_(at::Tensor & self) {
return wrapper_SparseCsrCPU__relu_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU_int_select(const at::Tensor & self, int64_t dim, c10::SymInt index) {
    // No device check
  // DeviceGuard omitted
  return at::native::select_sparse_csr(self, dim, index.guard_int(__FILE__, __LINE__));
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("select.int",
TORCH_FN(wrapper_SparseCsrCPU_int_select));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor select(const at::Tensor & self, int64_t dim, int64_t index) {
return wrapper_SparseCsrCPU_int_select(self, dim, index);
}
at::Tensor select_symint(const at::Tensor & self, int64_t dim, c10::SymInt index) {
return wrapper_SparseCsrCPU_int_select(self, dim, index);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__sin(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sin_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_sin_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::sin_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__sin_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sin_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("sin",
TORCH_FN(wrapper_SparseCsrCPU__sin));
m.impl("sin.out",
TORCH_FN(wrapper_SparseCsrCPU_out_sin_out));
m.impl("sin_",
TORCH_FN(wrapper_SparseCsrCPU__sin_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor sin(const at::Tensor & self) {
return wrapper_SparseCsrCPU__sin(self);
}
at::Tensor & sin_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_sin_out(self, out);
}
at::Tensor & sin_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_sin_out(self, out);
}
at::Tensor & sin_(at::Tensor & self) {
return wrapper_SparseCsrCPU__sin_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__sinh(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sinh_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_sinh_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::sinh_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__sinh_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sinh_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("sinh",
TORCH_FN(wrapper_SparseCsrCPU__sinh));
m.impl("sinh.out",
TORCH_FN(wrapper_SparseCsrCPU_out_sinh_out));
m.impl("sinh_",
TORCH_FN(wrapper_SparseCsrCPU__sinh_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor sinh(const at::Tensor & self) {
return wrapper_SparseCsrCPU__sinh(self);
}
at::Tensor & sinh_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_sinh_out(self, out);
}
at::Tensor & sinh_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_sinh_out(self, out);
}
at::Tensor & sinh_(at::Tensor & self) {
return wrapper_SparseCsrCPU__sinh_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__sum(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::sum_csr(self, dtype);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("sum",
TORCH_FN(wrapper_SparseCsrCPU__sum));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor sum(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
return wrapper_SparseCsrCPU__sum(self, dtype);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU_dim_IntList_sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::sum_sparse_compressed(self, dim, keepdim, dtype);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("sum.dim_IntList",
TORCH_FN(wrapper_SparseCsrCPU_dim_IntList_sum));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
return wrapper_SparseCsrCPU_dim_IntList_sum(self, dim, keepdim, dtype);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__sqrt(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sqrt_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_sqrt_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::sqrt_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__sqrt_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sqrt_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("sqrt",
TORCH_FN(wrapper_SparseCsrCPU__sqrt));
m.impl("sqrt.out",
TORCH_FN(wrapper_SparseCsrCPU_out_sqrt_out));
m.impl("sqrt_",
TORCH_FN(wrapper_SparseCsrCPU__sqrt_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor sqrt(const at::Tensor & self) {
return wrapper_SparseCsrCPU__sqrt(self);
}
at::Tensor & sqrt_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_sqrt_out(self, out);
}
at::Tensor & sqrt_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_sqrt_out(self, out);
}
at::Tensor & sqrt_(at::Tensor & self) {
return wrapper_SparseCsrCPU__sqrt_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__tan(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::tan_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_tan_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::tan_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__tan_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::tan_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("tan",
TORCH_FN(wrapper_SparseCsrCPU__tan));
m.impl("tan.out",
TORCH_FN(wrapper_SparseCsrCPU_out_tan_out));
m.impl("tan_",
TORCH_FN(wrapper_SparseCsrCPU__tan_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor tan(const at::Tensor & self) {
return wrapper_SparseCsrCPU__tan(self);
}
at::Tensor & tan_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_tan_out(self, out);
}
at::Tensor & tan_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_tan_out(self, out);
}
at::Tensor & tan_(at::Tensor & self) {
return wrapper_SparseCsrCPU__tan_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__tanh(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::tanh_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_tanh_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::tanh_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__tanh_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::tanh_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("tanh",
TORCH_FN(wrapper_SparseCsrCPU__tanh));
m.impl("tanh.out",
TORCH_FN(wrapper_SparseCsrCPU_out_tanh_out));
m.impl("tanh_",
TORCH_FN(wrapper_SparseCsrCPU__tanh_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor tanh(const at::Tensor & self) {
return wrapper_SparseCsrCPU__tanh(self);
}
at::Tensor & tanh_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_tanh_out(self, out);
}
at::Tensor & tanh_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_tanh_out(self, out);
}
at::Tensor & tanh_(at::Tensor & self) {
return wrapper_SparseCsrCPU__tanh_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
    // No device check
  // DeviceGuard omitted
  return at::native::threshold_backward_sparse_compressed(grad_output, self, threshold);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_grad_input_threshold_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
    // No device check
  // DeviceGuard omitted
  return at::native::threshold_backward_sparse_compressed_out(grad_output, self, threshold, grad_input);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("threshold_backward",
TORCH_FN(wrapper_SparseCsrCPU__threshold_backward));
m.impl("threshold_backward.grad_input",
TORCH_FN(wrapper_SparseCsrCPU_grad_input_threshold_backward_out));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
return wrapper_SparseCsrCPU__threshold_backward(grad_output, self, threshold);
}
at::Tensor & threshold_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
return wrapper_SparseCsrCPU_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input);
}
at::Tensor & threshold_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
return wrapper_SparseCsrCPU_grad_input_threshold_backward_out(grad_output, self, threshold, grad_input);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__trunc(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::trunc_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_trunc_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::trunc_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__trunc_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::trunc_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("trunc",
TORCH_FN(wrapper_SparseCsrCPU__trunc));
m.impl("trunc.out",
TORCH_FN(wrapper_SparseCsrCPU_out_trunc_out));
m.impl("trunc_",
TORCH_FN(wrapper_SparseCsrCPU__trunc_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor trunc(const at::Tensor & self) {
return wrapper_SparseCsrCPU__trunc(self);
}
at::Tensor & trunc_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_trunc_out(self, out);
}
at::Tensor & trunc_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_trunc_out(self, out);
}
at::Tensor & trunc_(at::Tensor & self) {
return wrapper_SparseCsrCPU__trunc_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU_dim_dtype__sparse_csr_sum(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::_sparse_csr_sum_cpu(self, dim, keepdim, dtype);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("_sparse_csr_sum.dim_dtype",
TORCH_FN(wrapper_SparseCsrCPU_dim_dtype__sparse_csr_sum));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor _sparse_csr_sum(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
return wrapper_SparseCsrCPU_dim_dtype__sparse_csr_sum(self, dim, keepdim, dtype);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU_dim_dtype__sparse_csr_prod(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    // No device check
  // DeviceGuard omitted
  return at::native::_sparse_csr_prod_cpu(self, dim, keepdim, dtype);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("_sparse_csr_prod.dim_dtype",
TORCH_FN(wrapper_SparseCsrCPU_dim_dtype__sparse_csr_prod));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor _sparse_csr_prod(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
return wrapper_SparseCsrCPU_dim_dtype__sparse_csr_prod(self, dim, keepdim, dtype);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__clone(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format) {
    // No device check
  // DeviceGuard omitted
  return at::native::clone_sparse_compressed(self, memory_format);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("clone",
TORCH_FN(wrapper_SparseCsrCPU__clone));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor clone(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format) {
return wrapper_SparseCsrCPU__clone(self, memory_format);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
const at::Tensor & wrapper_SparseCsrCPU__resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) {
    // No device check
  // DeviceGuard omitted
  return at::native::resize_as_sparse_compressed_(self, the_template);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("resize_as_sparse_",
TORCH_FN(wrapper_SparseCsrCPU__resize_as_sparse_));
}
} // anonymous namespace
namespace sparsecsrcpu {
const at::Tensor & resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) {
return wrapper_SparseCsrCPU__resize_as_sparse_(self, the_template);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseCsrCPU__zero_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::zero_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("zero_",
TORCH_FN(wrapper_SparseCsrCPU__zero_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor & zero_(at::Tensor & self) {
return wrapper_SparseCsrCPU__zero_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__sparse_sampled_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_sampled_addmm_sparse_csr_cpu(self, mat1, mat2, beta, alpha);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_sparse_sampled_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_sampled_addmm_out_sparse_csr_cpu(self, mat1, mat2, beta, alpha, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("sparse_sampled_addmm",
TORCH_FN(wrapper_SparseCsrCPU__sparse_sampled_addmm));
m.impl("sparse_sampled_addmm.out",
TORCH_FN(wrapper_SparseCsrCPU_out_sparse_sampled_addmm_out));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor sparse_sampled_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_SparseCsrCPU__sparse_sampled_addmm(self, mat1, mat2, beta, alpha);
}
at::Tensor & sparse_sampled_addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_SparseCsrCPU_out_sparse_sampled_addmm_out(self, mat1, mat2, beta, alpha, out);
}
at::Tensor & sparse_sampled_addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_sparse_sampled_addmm_out(self, mat1, mat2, beta, alpha, out);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_SparseCsrCPU___sparse_mm_reduce_impl(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) {
    // No device check
  // DeviceGuard omitted
  return at::native::_sparse_mm_reduce_impl_sparse_csr_cpu(self, other, reduce);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("_sparse_mm_reduce_impl",
TORCH_FN(wrapper_SparseCsrCPU___sparse_mm_reduce_impl));
}
} // anonymous namespace
namespace sparsecsrcpu {
::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) {
return wrapper_SparseCsrCPU___sparse_mm_reduce_impl(self, other, reduce);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor,at::Tensor> wrapper_SparseCsrCPU___sparse_mm_reduce_impl_backward(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) {
    // No device check
  // DeviceGuard omitted
  return at::native::_sparse_mm_reduce_impl_backward_sparse_csr_cpu(self, grad_out, weight, reduce, arg_out, output_mask);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("_sparse_mm_reduce_impl_backward",
TORCH_FN(wrapper_SparseCsrCPU___sparse_mm_reduce_impl_backward));
}
} // anonymous namespace
namespace sparsecsrcpu {
::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_backward(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) {
return wrapper_SparseCsrCPU___sparse_mm_reduce_impl_backward(self, grad_out, weight, reduce, arg_out, output_mask);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
    // No device check
  // DeviceGuard omitted
  return at::native::addmm_sparse_compressed_dense(self, mat1, mat2, beta, alpha);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_addmm_out(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::addmm_out_sparse_compressed_cpu(self, mat1, mat2, beta, alpha, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("addmm",
TORCH_FN(wrapper_SparseCsrCPU__addmm));
m.impl("addmm.out",
TORCH_FN(wrapper_SparseCsrCPU_out_addmm_out));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_SparseCsrCPU__addmm(self, mat1, mat2, beta, alpha);
}
at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
return wrapper_SparseCsrCPU_out_addmm_out(self, mat1, mat2, beta, alpha, out);
}
at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_addmm_out(self, mat1, mat2, beta, alpha, out);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__sparse_mask(const at::Tensor & self, const at::Tensor & mask) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_mask_sparse_compressed(self, mask);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("sparse_mask",
TORCH_FN(wrapper_SparseCsrCPU__sparse_mask));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor sparse_mask(const at::Tensor & self, const at::Tensor & mask) {
return wrapper_SparseCsrCPU__sparse_mask(self, mask);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU___to_dense(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_compressed_to_dense(self, dtype, masked_grad);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("_to_dense",
TORCH_FN(wrapper_SparseCsrCPU___to_dense));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor _to_dense(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad) {
return wrapper_SparseCsrCPU___to_dense(self, dtype, masked_grad);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
int64_t wrapper_SparseCsrCPU__sparse_dim(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_dim_sparse_csr(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("sparse_dim",
TORCH_FN(wrapper_SparseCsrCPU__sparse_dim));
}
} // anonymous namespace
namespace sparsecsrcpu {
int64_t sparse_dim(const at::Tensor & self) {
return wrapper_SparseCsrCPU__sparse_dim(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
int64_t wrapper_SparseCsrCPU__dense_dim(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::dense_dim_sparse_csr(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("dense_dim",
TORCH_FN(wrapper_SparseCsrCPU__dense_dim));
}
} // anonymous namespace
namespace sparsecsrcpu {
int64_t dense_dim(const at::Tensor & self) {
return wrapper_SparseCsrCPU__dense_dim(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
int64_t wrapper_SparseCsrCPU___nnz(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::_nnz_sparse_csr(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("_nnz",
TORCH_FN(wrapper_SparseCsrCPU___nnz));
}
} // anonymous namespace
namespace sparsecsrcpu {
int64_t _nnz(const at::Tensor & self) {
return wrapper_SparseCsrCPU___nnz(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__values(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::values_sparse_csr(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("values",
TORCH_FN(wrapper_SparseCsrCPU__values));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor values(const at::Tensor & self) {
return wrapper_SparseCsrCPU__values(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__crow_indices(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::crow_indices_sparse_csr(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("crow_indices",
TORCH_FN(wrapper_SparseCsrCPU__crow_indices));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor crow_indices(const at::Tensor & self) {
return wrapper_SparseCsrCPU__crow_indices(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__col_indices(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::col_indices_sparse_csr(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("col_indices",
TORCH_FN(wrapper_SparseCsrCPU__col_indices));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor col_indices(const at::Tensor & self) {
return wrapper_SparseCsrCPU__col_indices(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__ccol_indices(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::ccol_indices_sparse_csr(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("ccol_indices",
TORCH_FN(wrapper_SparseCsrCPU__ccol_indices));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor ccol_indices(const at::Tensor & self) {
return wrapper_SparseCsrCPU__ccol_indices(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__row_indices(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::row_indices_sparse_csr(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("row_indices",
TORCH_FN(wrapper_SparseCsrCPU__row_indices));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor row_indices(const at::Tensor & self) {
return wrapper_SparseCsrCPU__row_indices(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU_sparse_dim__to_sparse(const at::Tensor & self, int64_t sparse_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_compressed_to_sparse(self, sparse_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("_to_sparse.sparse_dim",
TORCH_FN(wrapper_SparseCsrCPU_sparse_dim__to_sparse));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor _to_sparse(const at::Tensor & self, int64_t sparse_dim) {
return wrapper_SparseCsrCPU_sparse_dim__to_sparse(self, sparse_dim);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU___to_sparse(const at::Tensor & self, ::std::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_compressed_to_sparse(self, layout, blocksize, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("_to_sparse",
TORCH_FN(wrapper_SparseCsrCPU___to_sparse));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor _to_sparse(const at::Tensor & self, ::std::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
return wrapper_SparseCsrCPU___to_sparse(self, layout, blocksize, dense_dim);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU___to_sparse_csr(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_compressed_to_sparse_csr(self, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("_to_sparse_csr",
TORCH_FN(wrapper_SparseCsrCPU___to_sparse_csr));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor _to_sparse_csr(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
return wrapper_SparseCsrCPU___to_sparse_csr(self, dense_dim);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU___to_sparse_csc(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_compressed_to_sparse_csc(self, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("_to_sparse_csc",
TORCH_FN(wrapper_SparseCsrCPU___to_sparse_csc));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor _to_sparse_csc(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
return wrapper_SparseCsrCPU___to_sparse_csc(self, dense_dim);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU___to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_compressed_to_sparse_bsr(self, blocksize, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("_to_sparse_bsr",
TORCH_FN(wrapper_SparseCsrCPU___to_sparse_bsr));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor _to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
return wrapper_SparseCsrCPU___to_sparse_bsr(self, blocksize, dense_dim);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU___to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    // No device check
  // DeviceGuard omitted
  return at::native::sparse_compressed_to_sparse_bsc(self, blocksize, dense_dim);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("_to_sparse_bsc",
TORCH_FN(wrapper_SparseCsrCPU___to_sparse_bsc));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor _to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
return wrapper_SparseCsrCPU___to_sparse_bsc(self, blocksize, dense_dim);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
::std::tuple<at::Tensor &,at::Tensor &> wrapper_SparseCsrCPU_X_triangular_solve_out(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) {
    // No device check
  // DeviceGuard omitted
  return at::native::triangular_solve_out_sparse_csr_cpu(self, A, upper, transpose, unitriangular, X, M);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("triangular_solve.X",
TORCH_FN(wrapper_SparseCsrCPU_X_triangular_solve_out));
}
} // anonymous namespace
namespace sparsecsrcpu {
::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_out(at::Tensor & X, at::Tensor & M, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
return wrapper_SparseCsrCPU_X_triangular_solve_out(self, A, upper, transpose, unitriangular, X, M);
}
::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_outf(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) {
return wrapper_SparseCsrCPU_X_triangular_solve_out(self, A, upper, transpose, unitriangular, X, M);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__erfinv(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::erfinv_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_erfinv_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::erfinv_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__erfinv_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::erfinv_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("erfinv",
TORCH_FN(wrapper_SparseCsrCPU__erfinv));
m.impl("erfinv.out",
TORCH_FN(wrapper_SparseCsrCPU_out_erfinv_out));
m.impl("erfinv_",
TORCH_FN(wrapper_SparseCsrCPU__erfinv_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor erfinv(const at::Tensor & self) {
return wrapper_SparseCsrCPU__erfinv(self);
}
at::Tensor & erfinv_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_erfinv_out(self, out);
}
at::Tensor & erfinv_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_erfinv_out(self, out);
}
at::Tensor & erfinv_(at::Tensor & self) {
return wrapper_SparseCsrCPU__erfinv_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__sign(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sign_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_sign_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::sign_sparse_csr_out(self, out);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU__sign_(at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::sign_sparse_csr_(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("sign",
TORCH_FN(wrapper_SparseCsrCPU__sign));
m.impl("sign.out",
TORCH_FN(wrapper_SparseCsrCPU_out_sign_out));
m.impl("sign_",
TORCH_FN(wrapper_SparseCsrCPU__sign_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor sign(const at::Tensor & self) {
return wrapper_SparseCsrCPU__sign(self);
}
at::Tensor & sign_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_sign_out(self, out);
}
at::Tensor & sign_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_sign_out(self, out);
}
at::Tensor & sign_(at::Tensor & self) {
return wrapper_SparseCsrCPU__sign_(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__signbit(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::signbit_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_signbit_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::signbit_sparse_csr_out(self, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("signbit",
TORCH_FN(wrapper_SparseCsrCPU__signbit));
m.impl("signbit.out",
TORCH_FN(wrapper_SparseCsrCPU_out_signbit_out));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor signbit(const at::Tensor & self) {
return wrapper_SparseCsrCPU__signbit(self);
}
at::Tensor & signbit_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_signbit_out(self, out);
}
at::Tensor & signbit_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_signbit_out(self, out);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor & wrapper_SparseCsrCPU__normal_(at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
    // No device check
  // DeviceGuard omitted
  return at::native::normal_sparse_csr_(self, mean, std, generator);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("normal_",
TORCH_FN(wrapper_SparseCsrCPU__normal_));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor & normal_(at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
return wrapper_SparseCsrCPU__normal_(self, mean, std, generator);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__isinf(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::isinf_sparse_csr(self);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("isinf",
TORCH_FN(wrapper_SparseCsrCPU__isinf));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor isinf(const at::Tensor & self) {
return wrapper_SparseCsrCPU__isinf(self);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__isposinf(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::isposinf_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_isposinf_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::isposinf_sparse_csr_out(self, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("isposinf",
TORCH_FN(wrapper_SparseCsrCPU__isposinf));
m.impl("isposinf.out",
TORCH_FN(wrapper_SparseCsrCPU_out_isposinf_out));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor isposinf(const at::Tensor & self) {
return wrapper_SparseCsrCPU__isposinf(self);
}
at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_isposinf_out(self, out);
}
at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_isposinf_out(self, out);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU__isneginf(const at::Tensor & self) {
    // No device check
  // DeviceGuard omitted
  return at::native::isneginf_sparse_csr(self);
}
} // anonymous namespace
namespace {
at::Tensor & wrapper_SparseCsrCPU_out_isneginf_out(const at::Tensor & self, at::Tensor & out) {
    // No device check
  // DeviceGuard omitted
  return at::native::isneginf_sparse_csr_out(self, out);
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("isneginf",
TORCH_FN(wrapper_SparseCsrCPU__isneginf));
m.impl("isneginf.out",
TORCH_FN(wrapper_SparseCsrCPU_out_isneginf_out));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor isneginf(const at::Tensor & self) {
return wrapper_SparseCsrCPU__isneginf(self);
}
at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self) {
return wrapper_SparseCsrCPU_out_isneginf_out(self, out);
}
at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out) {
return wrapper_SparseCsrCPU_out_isneginf_out(self, out);
}
} // namespace sparsecsrcpu
} // namespace at
namespace at {
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
// ambiguity with conflicting identifiers that may have been defined in
// at namespace already.
namespace {
namespace {
at::Tensor wrapper_SparseCsrCPU_int_select_copy(const at::Tensor & self, int64_t dim, c10::SymInt index) {
    // No device check
  // DeviceGuard omitted
  return at::native::select_copy_sparse_csr(self, dim, index.guard_int(__FILE__, __LINE__));
}
} // anonymous namespace
TORCH_LIBRARY_IMPL(aten, SparseCsrCPU, m) {
    m.impl("select_copy.int",
TORCH_FN(wrapper_SparseCsrCPU_int_select_copy));
}
} // anonymous namespace
namespace sparsecsrcpu {
at::Tensor select_copy(const at::Tensor & self, int64_t dim, int64_t index) {
return wrapper_SparseCsrCPU_int_select_copy(self, dim, index);
}
at::Tensor select_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt index) {
return wrapper_SparseCsrCPU_int_select_copy(self, dim, index);
}
} // namespace sparsecsrcpu
} // namespace at
