#include <ATen/Tensor.h>
#include <ATen/core/dispatch/Dispatcher.h>

// @generated by torchgen/gen.py from Operators.cpp
// NOTE See [Sharded File] comment in VariableType

#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Operators.h>
#else
#include <ATen/ops/_cast_Short.h>
#include <ATen/ops/output_nr.h>
#include <ATen/ops/_new_zeros_with_same_feature_meta.h>
#include <ATen/ops/_assert_scalar.h>
#include <ATen/ops/_print.h>
#include <ATen/ops/_functional_sym_constrain_range_for_size.h>
#include <ATen/ops/_make_dep_token.h>
#include <ATen/ops/_cudnn_init_dropout_state.h>
#include <ATen/ops/native_dropout.h>
#include <ATen/ops/absolute.h>
#include <ATen/ops/absolute.h>
#include <ATen/ops/absolute.h>
#include <ATen/ops/angle.h>
#include <ATen/ops/angle.h>
#include <ATen/ops/add.h>
#include <ATen/ops/add.h>
#include <ATen/ops/add.h>
#include <ATen/ops/add.h>
#include <ATen/ops/add.h>
#include <ATen/ops/all.h>
#include <ATen/ops/all.h>
#include <ATen/ops/all.h>
#include <ATen/ops/all.h>
#include <ATen/ops/all.h>
#include <ATen/ops/all.h>
#include <ATen/ops/asinh.h>
#include <ATen/ops/asinh.h>
#include <ATen/ops/asinh.h>
#include <ATen/ops/atleast_2d.h>
#include <ATen/ops/atleast_2d.h>
#include <ATen/ops/baddbmm.h>
#include <ATen/ops/baddbmm.h>
#include <ATen/ops/baddbmm.h>
#include <ATen/ops/batch_norm.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/bilinear.h>
#include <ATen/ops/binary_cross_entropy_with_logits.h>
#include <ATen/ops/bincount.h>
#include <ATen/ops/_lazy_clone.h>
#include <ATen/ops/logical_and.h>
#include <ATen/ops/logical_and.h>
#include <ATen/ops/logical_and.h>
#include <ATen/ops/block_diag.h>
#include <ATen/ops/unsafe_chunk.h>
#include <ATen/ops/chunk.h>
#include <ATen/ops/tensor_split.h>
#include <ATen/ops/tensor_split.h>
#include <ATen/ops/tensor_split.h>
#include <ATen/ops/clamp.h>
#include <ATen/ops/clamp.h>
#include <ATen/ops/clamp.h>
#include <ATen/ops/clamp.h>
#include <ATen/ops/clamp.h>
#include <ATen/ops/clamp.h>
#include <ATen/ops/clamp_max.h>
#include <ATen/ops/clamp_max.h>
#include <ATen/ops/clamp_max.h>
#include <ATen/ops/clamp_max.h>
#include <ATen/ops/clamp_max.h>
#include <ATen/ops/clamp_max.h>
#include <ATen/ops/clip.h>
#include <ATen/ops/clip.h>
#include <ATen/ops/clip.h>
#include <ATen/ops/clip.h>
#include <ATen/ops/clip.h>
#include <ATen/ops/clip.h>
#include <ATen/ops/cudnn_is_acceptable.h>
#include <ATen/ops/complex.h>
#include <ATen/ops/complex.h>
#include <ATen/ops/polar.h>
#include <ATen/ops/polar.h>
#include <ATen/ops/conv_transpose2d.h>
#include <ATen/ops/count_nonzero.h>
#include <ATen/ops/count_nonzero.h>
#include <ATen/ops/cov.h>
#include <ATen/ops/cudnn_convolution_add_relu.h>
#include <ATen/ops/cummax.h>
#include <ATen/ops/cummax.h>
#include <ATen/ops/cummax.h>
#include <ATen/ops/cummax.h>
#include <ATen/ops/_cummax_helper.h>
#include <ATen/ops/_ctc_loss_backward.h>
#include <ATen/ops/_ctc_loss_backward.h>
#include <ATen/ops/diagonal_backward.h>
#include <ATen/ops/diff.h>
#include <ATen/ops/diff.h>
#include <ATen/ops/gradient.h>
#include <ATen/ops/gradient.h>
#include <ATen/ops/gradient.h>
#include <ATen/ops/gradient.h>
#include <ATen/ops/gradient.h>
#include <ATen/ops/gradient.h>
#include <ATen/ops/gradient.h>
#include <ATen/ops/dot.h>
#include <ATen/ops/dot.h>
#include <ATen/ops/einsum.h>
#include <ATen/ops/embedding_renorm.h>
#include <ATen/ops/embedding_sparse_backward.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/new_empty_strided.h>
#include <ATen/ops/new_full.h>
#include <ATen/ops/new_ones.h>
#include <ATen/ops/_empty_per_channel_affine_quantized.h>
#include <ATen/ops/empty_quantized.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_strided.h>
#include <ATen/ops/exp.h>
#include <ATen/ops/exp.h>
#include <ATen/ops/exp.h>
#include <ATen/ops/exp2.h>
#include <ATen/ops/exp2.h>
#include <ATen/ops/exp2.h>
#include <ATen/ops/eye.h>
#include <ATen/ops/eye.h>
#include <ATen/ops/eye.h>
#include <ATen/ops/eye.h>
#include <ATen/ops/frac.h>
#include <ATen/ops/frac.h>
#include <ATen/ops/frac.h>
#include <ATen/ops/from_file.h>
#include <ATen/ops/gcd.h>
#include <ATen/ops/gcd.h>
#include <ATen/ops/gcd.h>
#include <ATen/ops/_cufft_clear_plan_cache.h>
#include <ATen/ops/_unsafe_masked_index.h>
#include <ATen/ops/isin.h>
#include <ATen/ops/isin.h>
#include <ATen/ops/isin.h>
#include <ATen/ops/isin.h>
#include <ATen/ops/isin.h>
#include <ATen/ops/isin.h>
#include <ATen/ops/is_conj.h>
#include <ATen/ops/_is_zerotensor.h>
#include <ATen/ops/is_nonzero.h>
#include <ATen/ops/is_signed.h>
#include <ATen/ops/layer_norm.h>
#include <ATen/ops/native_layer_norm_backward.h>
#include <ATen/ops/rms_norm.h>
#include <ATen/ops/fbgemm_linear_fp16_weight.h>
#include <ATen/ops/fbgemm_pack_quantized_matrix.h>
#include <ATen/ops/fbgemm_pack_quantized_matrix.h>
#include <ATen/ops/ldexp.h>
#include <ATen/ops/ldexp.h>
#include <ATen/ops/ldexp.h>
#include <ATen/ops/log.h>
#include <ATen/ops/log.h>
#include <ATen/ops/log.h>
#include <ATen/ops/log2.h>
#include <ATen/ops/log2.h>
#include <ATen/ops/log2.h>
#include <ATen/ops/logaddexp.h>
#include <ATen/ops/logaddexp.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/log_softmax.h>
#include <ATen/ops/log_softmax.h>
#include <ATen/ops/log_softmax.h>
#include <ATen/ops/matrix_power.h>
#include <ATen/ops/matrix_power.h>
#include <ATen/ops/mkldnn_max_pool3d.h>
#include <ATen/ops/quantized_max_pool3d.h>
#include <ATen/ops/mps_convolution_backward.h>
#include <ATen/ops/mkldnn_rnn_layer.h>
#include <ATen/ops/miopen_convolution.h>
#include <ATen/ops/miopen_rnn.h>
#include <ATen/ops/_convert_weight_to_int4pack_for_cpu.h>
#include <ATen/ops/_sparse_mm.h>
#include <ATen/ops/_sparse_mm.h>
#include <ATen/ops/_sparse_sparse_matmul.h>
#include <ATen/ops/_native_batch_norm_legit.h>
#include <ATen/ops/_native_batch_norm_legit.h>
#include <ATen/ops/_native_batch_norm_legit.h>
#include <ATen/ops/_native_batch_norm_legit.h>
#include <ATen/ops/batch_norm_update_stats.h>
#include <ATen/ops/_nnpack_available.h>
#include <ATen/ops/ones_like.h>
#include <ATen/ops/_euclidean_dist.h>
#include <ATen/ops/_cdist_backward.h>
#include <ATen/ops/_pdist_forward.h>
#include <ATen/ops/native_channel_shuffle.h>
#include <ATen/ops/rad2deg.h>
#include <ATen/ops/rad2deg.h>
#include <ATen/ops/rad2deg.h>
#include <ATen/ops/scalar_tensor.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randn_like.h>
#include <ATen/ops/repeat.h>
#include <ATen/ops/repeat_interleave.h>
#include <ATen/ops/repeat_interleave.h>
#include <ATen/ops/repeat_interleave.h>
#include <ATen/ops/_mkldnn_reshape.h>
#include <ATen/ops/_prelu_kernel.h>
#include <ATen/ops/rsqrt.h>
#include <ATen/ops/rsqrt.h>
#include <ATen/ops/rsqrt.h>
#include <ATen/ops/_nested_select_backward.h>
#include <ATen/ops/sym_size.h>
#include <ATen/ops/vsplit.h>
#include <ATen/ops/vsplit.h>
#include <ATen/ops/hstack.h>
#include <ATen/ops/hstack.h>
#include <ATen/ops/istft.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/nansum.h>
#include <ATen/ops/nansum.h>
#include <ATen/ops/flipud.h>
#include <ATen/ops/rot90.h>
#include <ATen/ops/trapz.h>
#include <ATen/ops/trapz.h>
#include <ATen/ops/_nested_tensor_strides.h>
#include <ATen/ops/_nested_tensor_storage_offsets.h>
#include <ATen/ops/_nested_get_ragged_idx.h>
#include <ATen/ops/_nested_get_min_seqlen.h>
#include <ATen/ops/triplet_margin_loss.h>
#include <ATen/ops/trunc.h>
#include <ATen/ops/trunc.h>
#include <ATen/ops/trunc.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var_mean.h>
#include <ATen/ops/var_mean.h>
#include <ATen/ops/var_mean.h>
#include <ATen/ops/var_mean.h>
#include <ATen/ops/var_mean.h>
#include <ATen/ops/norm_except_dim.h>
#include <ATen/ops/_standard_gamma_grad.h>
#include <ATen/ops/native_norm.h>
#include <ATen/ops/native_norm.h>
#include <ATen/ops/_batch_norm_with_update.h>
#include <ATen/ops/_batch_norm_with_update.h>
#include <ATen/ops/_sparse_sum_backward.h>
#include <ATen/ops/_sparse_csr_sum.h>
#include <ATen/ops/_sparse_softmax.h>
#include <ATen/ops/_sparse_softmax.h>
#include <ATen/ops/_sparse_softmax.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/nuclear_norm.h>
#include <ATen/ops/nuclear_norm.h>
#include <ATen/ops/nuclear_norm.h>
#include <ATen/ops/nuclear_norm.h>
#include <ATen/ops/_sparse_compressed_tensor_with_dims.h>
#include <ATen/ops/_sparse_csc_tensor_unsafe.h>
#include <ATen/ops/_validate_sparse_coo_tensor_args.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
#include <ATen/ops/_sparse_mask_projection.h>
#include <ATen/ops/_to_dense.h>
#include <ATen/ops/is_coalesced.h>
#include <ATen/ops/_coalesced.h>
#include <ATen/ops/indices.h>
#include <ATen/ops/col_indices.h>
#include <ATen/ops/hspmm.h>
#include <ATen/ops/hspmm.h>
#include <ATen/ops/to_sparse_bsc.h>
#include <ATen/ops/_to_sparse_semi_structured.h>
#include <ATen/ops/quantize_per_tensor_dynamic.h>
#include <ATen/ops/quantize_per_tensor.h>
#include <ATen/ops/quantize_per_tensor.h>
#include <ATen/ops/quantize_per_tensor.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine.h>
#include <ATen/ops/choose_qparams_optimized.h>
#include <ATen/ops/cartesian_prod.h>
#include <ATen/ops/promote_types.h>
#include <ATen/ops/_local_scalar_dense.h>
#include <ATen/ops/_thnn_fused_gru_cell_backward.h>
#include <ATen/ops/rnn_relu.h>
#include <ATen/ops/rnn_relu.h>
#include <ATen/ops/gru_cell.h>
#include <ATen/ops/quantized_lstm_cell.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/masked_scatter_backward.h>
#include <ATen/ops/put.h>
#include <ATen/ops/put.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter_reduce.h>
#include <ATen/ops/scatter_reduce.h>
#include <ATen/ops/scatter_reduce.h>
#include <ATen/ops/and.h>
#include <ATen/ops/and.h>
#include <ATen/ops/and.h>
#include <ATen/ops/and.h>
#include <ATen/ops/tril.h>
#include <ATen/ops/uniform.h>
#include <ATen/ops/tril.h>
#include <ATen/ops/tril.h>
#include <ATen/ops/tril_indices.h>
#include <ATen/ops/less_equal.h>
#include <ATen/ops/less_equal.h>
#include <ATen/ops/less_equal.h>
#include <ATen/ops/less_equal.h>
#include <ATen/ops/less_equal.h>
#include <ATen/ops/less_equal.h>
#include <ATen/ops/gt.h>
#include <ATen/ops/gt.h>
#include <ATen/ops/gt.h>
#include <ATen/ops/gt.h>
#include <ATen/ops/gt.h>
#include <ATen/ops/gt.h>
#include <ATen/ops/lt.h>
#include <ATen/ops/lt.h>
#include <ATen/ops/lt.h>
#include <ATen/ops/lt.h>
#include <ATen/ops/lt.h>
#include <ATen/ops/lt.h>
#include <ATen/ops/less.h>
#include <ATen/ops/less.h>
#include <ATen/ops/less.h>
#include <ATen/ops/less.h>
#include <ATen/ops/less.h>
#include <ATen/ops/less.h>
#include <ATen/ops/masked_select.h>
#include <ATen/ops/masked_select.h>
#include <ATen/ops/nonzero_static.h>
#include <ATen/ops/nonzero_static.h>
#include <ATen/ops/addcdiv.h>
#include <ATen/ops/addcdiv.h>
#include <ATen/ops/addcdiv.h>
#include <ATen/ops/_cholesky_solve_helper.h>
#include <ATen/ops/cholesky_inverse.h>
#include <ATen/ops/cholesky_inverse.h>
#include <ATen/ops/_lu_with_info.h>
#include <ATen/ops/atan2.h>
#include <ATen/ops/atan2.h>
#include <ATen/ops/atan2.h>
#include <ATen/ops/histogramdd.h>
#include <ATen/ops/histogramdd.h>
#include <ATen/ops/histogramdd.h>
#include <ATen/ops/hypot.h>
#include <ATen/ops/hypot.h>
#include <ATen/ops/hypot.h>
#include <ATen/ops/igammac.h>
#include <ATen/ops/igammac.h>
#include <ATen/ops/igammac.h>
#include <ATen/ops/fmax.h>
#include <ATen/ops/fmax.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/all.h>
#include <ATen/ops/all.h>
#include <ATen/ops/_amp_update_scale.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_exp.h>
#include <ATen/ops/_foreach_exp.h>
#include <ATen/ops/_foreach_log.h>
#include <ATen/ops/_foreach_log.h>
#include <ATen/ops/_foreach_log1p.h>
#include <ATen/ops/_foreach_log1p.h>
#include <ATen/ops/_foreach_neg.h>
#include <ATen/ops/_foreach_neg.h>
#include <ATen/ops/_foreach_norm.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_reciprocal.h>
#include <ATen/ops/_foreach_reciprocal.h>
#include <ATen/ops/_foreach_rsqrt.h>
#include <ATen/ops/_foreach_rsqrt.h>
#include <ATen/ops/_foreach_sigmoid.h>
#include <ATen/ops/_foreach_sigmoid.h>
#include <ATen/ops/_foreach_sin.h>
#include <ATen/ops/_foreach_sin.h>
#include <ATen/ops/_foreach_sqrt.h>
#include <ATen/ops/_foreach_sqrt.h>
#include <ATen/ops/_convert_indices_from_coo_to_csr.h>
#include <ATen/ops/_convert_indices_from_coo_to_csr.h>
#include <ATen/ops/multi_margin_loss.h>
#include <ATen/ops/multi_margin_loss.h>
#include <ATen/ops/multilabel_margin_loss.h>
#include <ATen/ops/multilabel_margin_loss.h>
#include <ATen/ops/multilabel_margin_loss_forward.h>
#include <ATen/ops/multilabel_margin_loss_forward.h>
#include <ATen/ops/nll_loss_forward.h>
#include <ATen/ops/nll_loss_forward.h>
#include <ATen/ops/soft_margin_loss_backward.h>
#include <ATen/ops/soft_margin_loss_backward.h>
#include <ATen/ops/glu_jvp.h>
#include <ATen/ops/hardswish.h>
#include <ATen/ops/hardswish.h>
#include <ATen/ops/hardswish.h>
#include <ATen/ops/rrelu_with_noise.h>
#include <ATen/ops/rrelu_with_noise.h>
#include <ATen/ops/rrelu_with_noise.h>
#include <ATen/ops/softshrink_backward.h>
#include <ATen/ops/softshrink_backward.h>
#include <ATen/ops/_adaptive_avg_pool2d_backward.h>
#include <ATen/ops/avg_pool2d.h>
#include <ATen/ops/avg_pool2d.h>
#include <ATen/ops/fractional_max_pool2d_backward.h>
#include <ATen/ops/fractional_max_pool2d_backward.h>
#include <ATen/ops/max_pool3d_with_indices_backward.h>
#include <ATen/ops/max_pool3d_with_indices_backward.h>
#include <ATen/ops/reflection_pad1d_backward.h>
#include <ATen/ops/reflection_pad1d_backward.h>
#include <ATen/ops/reflection_pad2d_backward.h>
#include <ATen/ops/reflection_pad2d_backward.h>
#include <ATen/ops/replication_pad1d_backward.h>
#include <ATen/ops/replication_pad1d_backward.h>
#include <ATen/ops/replication_pad3d_backward.h>
#include <ATen/ops/replication_pad3d_backward.h>
#include <ATen/ops/_upsample_nearest_exact2d.h>
#include <ATen/ops/_upsample_nearest_exact3d.h>
#include <ATen/ops/upsample_bilinear2d_backward.h>
#include <ATen/ops/upsample_bilinear2d_backward.h>
#include <ATen/ops/_upsample_bicubic2d_aa_backward.h>
#include <ATen/ops/_upsample_bicubic2d_aa_backward.h>
#include <ATen/ops/_upsample_nearest_exact1d_backward.h>
#include <ATen/ops/_upsample_nearest_exact1d_backward.h>
#include <ATen/ops/_upsample_nearest_exact2d.h>
#include <ATen/ops/_upsample_nearest_exact2d.h>
#include <ATen/ops/_upsample_nearest_exact2d_backward.h>
#include <ATen/ops/_upsample_nearest_exact2d_backward.h>
#include <ATen/ops/_upsample_nearest_exact3d.h>
#include <ATen/ops/_upsample_nearest_exact3d.h>
#include <ATen/ops/slow_conv_dilated3d.h>
#include <ATen/ops/isinf.h>
#include <ATen/ops/special_digamma.h>
#include <ATen/ops/special_digamma.h>
#include <ATen/ops/special_ndtr.h>
#include <ATen/ops/special_ndtr.h>
#include <ATen/ops/special_zeta.h>
#include <ATen/ops/special_zeta.h>
#include <ATen/ops/special_zeta.h>
#include <ATen/ops/special_zeta.h>
#include <ATen/ops/special_zeta.h>
#include <ATen/ops/special_zeta.h>
#include <ATen/ops/special_round.h>
#include <ATen/ops/special_round.h>
#include <ATen/ops/fft_ifft.h>
#include <ATen/ops/fft_ifft.h>
#include <ATen/ops/fft_hfft.h>
#include <ATen/ops/fft_hfft.h>
#include <ATen/ops/fft_ihfft.h>
#include <ATen/ops/fft_ihfft.h>
#include <ATen/ops/fft_ihfft2.h>
#include <ATen/ops/fft_ihfft2.h>
#include <ATen/ops/fft_irfftn.h>
#include <ATen/ops/fft_irfftn.h>
#include <ATen/ops/fft_ifftshift.h>
#include <ATen/ops/slogdet.h>
#include <ATen/ops/slogdet.h>
#include <ATen/ops/linalg_eig.h>
#include <ATen/ops/linalg_eig.h>
#include <ATen/ops/_linalg_eigvals.h>
#include <ATen/ops/linalg_eigh.h>
#include <ATen/ops/linalg_eigh.h>
#include <ATen/ops/linalg_eigvalsh.h>
#include <ATen/ops/linalg_eigvalsh.h>
#include <ATen/ops/linalg_householder_product.h>
#include <ATen/ops/linalg_householder_product.h>
#include <ATen/ops/linalg_matrix_norm.h>
#include <ATen/ops/linalg_matrix_norm.h>
#include <ATen/ops/linalg_matrix_norm.h>
#include <ATen/ops/linalg_matrix_norm.h>
#include <ATen/ops/linalg_svd.h>
#include <ATen/ops/linalg_svd.h>
#include <ATen/ops/_test_optional_floatlist.h>
#include <ATen/ops/unflatten_dense_tensors.h>
#include <ATen/ops/_nested_tensor_from_tensor_list.h>
#include <ATen/ops/_sparse_broadcast_to_copy.h>
#include <ATen/ops/transpose_copy.h>
#include <ATen/ops/_indices_copy.h>
#include <ATen/ops/_values_copy.h>
#include <ATen/ops/values_copy.h>
#include <ATen/ops/view_copy.h>
#include <ATen/ops/view_copy.h>
#include <ATen/ops/unfold_copy.h>
#include <ATen/ops/scaled_dot_product_attention.h>
#include <ATen/ops/_scaled_dot_product_fused_attention_overrideable_backward.h>
#include <ATen/ops/_scaled_dot_product_cudnn_attention.h>
#include <ATen/ops/_scaled_dot_product_cudnn_attention_backward.h>
#include <ATen/ops/special_bessel_y1.h>
#include <ATen/ops/special_bessel_y1.h>
#include <ATen/ops/special_laguerre_polynomial_l.h>
#include <ATen/ops/special_laguerre_polynomial_l.h>
#include <ATen/ops/special_laguerre_polynomial_l.h>
#include <ATen/ops/special_laguerre_polynomial_l.h>
#include <ATen/ops/special_laguerre_polynomial_l.h>
#include <ATen/ops/special_laguerre_polynomial_l.h>
#include <ATen/ops/special_legendre_polynomial_p.h>
#include <ATen/ops/special_legendre_polynomial_p.h>
#include <ATen/ops/special_legendre_polynomial_p.h>
#include <ATen/ops/special_legendre_polynomial_p.h>
#include <ATen/ops/special_legendre_polynomial_p.h>
#include <ATen/ops/special_legendre_polynomial_p.h>
#include <ATen/ops/special_scaled_modified_bessel_k0.h>
#include <ATen/ops/special_scaled_modified_bessel_k0.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
#include <ATen/ops/_fused_adam.h>
#include <ATen/ops/_fused_adam.h>
#include <ATen/ops/_fused_sgd.h>
#include <ATen/ops/_fused_sgd.h>
#include <ATen/ops/_propagate_xla_data.h>
#include <ATen/ops/_new_zeros_with_same_feature_meta.h>
#include <ATen/ops/_cudnn_init_dropout_state.h>
#include <ATen/ops/native_dropout.h>
#include <ATen/ops/add.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/binary_cross_entropy_with_logits.h>
#include <ATen/ops/bincount.h>
#include <ATen/ops/block_diag.h>
#include <ATen/ops/count_nonzero.h>
#include <ATen/ops/count_nonzero.h>
#include <ATen/ops/cudnn_convolution_add_relu.h>
#include <ATen/ops/_ctc_loss_backward.h>
#include <ATen/ops/diagonal_backward.h>
#include <ATen/ops/embedding_renorm.h>
#include <ATen/ops/embedding_renorm.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/new_empty_strided.h>
#include <ATen/ops/new_full.h>
#include <ATen/ops/new_ones.h>
#include <ATen/ops/_empty_per_channel_affine_quantized.h>
#include <ATen/ops/empty_quantized.h>
#include <ATen/ops/empty_strided.h>
#include <ATen/ops/from_file.h>
#include <ATen/ops/native_layer_norm_backward.h>
#include <ATen/ops/mkldnn_max_pool3d.h>
#include <ATen/ops/quantized_max_pool3d.h>
#include <ATen/ops/mps_convolution_backward.h>
#include <ATen/ops/mkldnn_rnn_layer.h>
#include <ATen/ops/miopen_convolution.h>
#include <ATen/ops/miopen_rnn.h>
#include <ATen/ops/_sparse_sparse_matmul.h>
#include <ATen/ops/_native_batch_norm_legit.h>
#include <ATen/ops/batch_norm_update_stats.h>
#include <ATen/ops/ones_like.h>
#include <ATen/ops/_euclidean_dist.h>
#include <ATen/ops/_cdist_backward.h>
#include <ATen/ops/_pdist_forward.h>
#include <ATen/ops/scalar_tensor.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/randn_like.h>
#include <ATen/ops/repeat.h>
#include <ATen/ops/repeat_interleave.h>
#include <ATen/ops/_mkldnn_reshape.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/rot90.h>
#include <ATen/ops/_nested_tensor_strides.h>
#include <ATen/ops/_nested_tensor_storage_offsets.h>
#include <ATen/ops/var_mean.h>
#include <ATen/ops/_standard_gamma_grad.h>
#include <ATen/ops/native_norm.h>
#include <ATen/ops/native_norm.h>
#include <ATen/ops/_batch_norm_with_update.h>
#include <ATen/ops/_sparse_sum_backward.h>
#include <ATen/ops/_sparse_csr_sum.h>
#include <ATen/ops/_sparse_softmax.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
#include <ATen/ops/_sparse_mask_projection.h>
#include <ATen/ops/_to_dense.h>
#include <ATen/ops/_coalesced.h>
#include <ATen/ops/_coalesced.h>
#include <ATen/ops/quantize_per_tensor_dynamic.h>
#include <ATen/ops/quantize_per_tensor.h>
#include <ATen/ops/quantize_per_tensor.h>
#include <ATen/ops/quantize_per_tensor.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine.h>
#include <ATen/ops/_thnn_fused_gru_cell_backward.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/put.h>
#include <ATen/ops/uniform.h>
#include <ATen/ops/uniform.h>
#include <ATen/ops/tril_indices.h>
#include <ATen/ops/_cholesky_solve_helper.h>
#include <ATen/ops/_amp_update_scale.h>
#include <ATen/ops/_amp_update_scale.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_exp.h>
#include <ATen/ops/_foreach_log.h>
#include <ATen/ops/_foreach_log1p.h>
#include <ATen/ops/_foreach_neg.h>
#include <ATen/ops/_foreach_norm.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_reciprocal.h>
#include <ATen/ops/_foreach_rsqrt.h>
#include <ATen/ops/_foreach_sigmoid.h>
#include <ATen/ops/_foreach_sin.h>
#include <ATen/ops/_foreach_sqrt.h>
#include <ATen/ops/glu_jvp.h>
#include <ATen/ops/rrelu_with_noise.h>
#include <ATen/ops/_adaptive_avg_pool2d_backward.h>
#include <ATen/ops/slow_conv_dilated3d.h>
#include <ATen/ops/isinf.h>
#include <ATen/ops/_test_optional_floatlist.h>
#include <ATen/ops/_nested_tensor_from_tensor_list.h>
#include <ATen/ops/_sparse_broadcast_to_copy.h>
#include <ATen/ops/transpose_copy.h>
#include <ATen/ops/_indices_copy.h>
#include <ATen/ops/_values_copy.h>
#include <ATen/ops/values_copy.h>
#include <ATen/ops/view_copy.h>
#include <ATen/ops/view_copy.h>
#include <ATen/ops/unfold_copy.h>
#include <ATen/ops/_fused_adam.h>
#include <ATen/ops/_fused_adam.h>
#include <ATen/ops/_fused_adam.h>
#include <ATen/ops/_fused_adam.h>
#include <ATen/ops/_fused_sgd.h>
#include <ATen/ops/_fused_sgd.h>
#include <ATen/ops/_fused_sgd.h>
#include <ATen/ops/_fused_sgd.h>
#endif



namespace at { namespace _ops {


// aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cast_Short::schema> create__cast_Short_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cast_Short::name, _cast_Short::overload_name)
      .typed<_cast_Short::schema>();
}

// aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Short::call(const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Short_typed_handle();
    return op.call(self, non_blocking);
}

// aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Short::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Short_typed_handle();
    return op.redispatch(dispatchKeySet, self, non_blocking);
}

// aten::output_nr(Tensor self) -> int
static C10_NOINLINE c10::TypedOperatorHandle<output_nr::schema> create_output_nr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(output_nr::name, output_nr::overload_name)
      .typed<output_nr::schema>();
}

// aten::output_nr(Tensor self) -> int
int64_t output_nr::call(const at::Tensor & self) {
    
    static auto op = create_output_nr_typed_handle();
    return op.call(self);
}

// aten::output_nr(Tensor self) -> int
int64_t output_nr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_output_nr_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_new_zeros_with_same_feature_meta::schema> create__new_zeros_with_same_feature_meta_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_new_zeros_with_same_feature_meta::name, _new_zeros_with_same_feature_meta::overload_name)
      .typed<_new_zeros_with_same_feature_meta::schema>();
}

// aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor
at::Tensor _new_zeros_with_same_feature_meta::call(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims) {
    
    static auto op = create__new_zeros_with_same_feature_meta_typed_handle();
    return op.call(self, other, self_num_batch_dims);
}

// aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor
at::Tensor _new_zeros_with_same_feature_meta::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims) {
    
    static auto op = create__new_zeros_with_same_feature_meta_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, self_num_batch_dims);
}

// aten::_assert_scalar(Scalar self, str assert_msg) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_assert_scalar::schema> create__assert_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_assert_scalar::name, _assert_scalar::overload_name)
      .typed<_assert_scalar::schema>();
}

// aten::_assert_scalar(Scalar self, str assert_msg) -> ()
void _assert_scalar::call(const at::Scalar & self, c10::string_view assert_msg) {
    
    static auto op = create__assert_scalar_typed_handle();
    return op.call(self, assert_msg);
}

// aten::_assert_scalar(Scalar self, str assert_msg) -> ()
void _assert_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, c10::string_view assert_msg) {
    
    static auto op = create__assert_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, assert_msg);
}

// aten::_print(str s) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_print::schema> create__print_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_print::name, _print::overload_name)
      .typed<_print::schema>();
}

// aten::_print(str s) -> ()
void _print::call(c10::string_view s) {
    
    static auto op = create__print_typed_handle();
    return op.call(s);
}

// aten::_print(str s) -> ()
void _print::redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view s) {
    
    static auto op = create__print_typed_handle();
    return op.redispatch(dispatchKeySet, s);
}

// aten::_functional_sym_constrain_range_for_size(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_functional_sym_constrain_range_for_size::schema> create__functional_sym_constrain_range_for_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_functional_sym_constrain_range_for_size::name, _functional_sym_constrain_range_for_size::overload_name)
      .typed<_functional_sym_constrain_range_for_size::schema>();
}

// aten::_functional_sym_constrain_range_for_size(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor
at::Tensor _functional_sym_constrain_range_for_size::call(const at::Scalar & size, ::std::optional<int64_t> min, ::std::optional<int64_t> max, const at::Tensor & dep_token) {
    
    static auto op = create__functional_sym_constrain_range_for_size_typed_handle();
    return op.call(size, min, max, dep_token);
}

// aten::_functional_sym_constrain_range_for_size(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor
at::Tensor _functional_sym_constrain_range_for_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & size, ::std::optional<int64_t> min, ::std::optional<int64_t> max, const at::Tensor & dep_token) {
    
    static auto op = create__functional_sym_constrain_range_for_size_typed_handle();
    return op.redispatch(dispatchKeySet, size, min, max, dep_token);
}

// aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_make_dep_token::schema> create__make_dep_token_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_make_dep_token::name, _make_dep_token::overload_name)
      .typed<_make_dep_token::schema>();
}

// aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor _make_dep_token::call(::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create__make_dep_token_typed_handle();
    return op.call(dtype, layout, device, pin_memory, memory_format);
}

// aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor _make_dep_token::redispatch(c10::DispatchKeySet dispatchKeySet, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create__make_dep_token_typed_handle();
    return op.redispatch(dispatchKeySet, dtype, layout, device, pin_memory, memory_format);
}

// aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_init_dropout_state::schema> create__cudnn_init_dropout_state_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cudnn_init_dropout_state::name, _cudnn_init_dropout_state::overload_name)
      .typed<_cudnn_init_dropout_state::schema>();
}

// aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor _cudnn_init_dropout_state::call(double dropout, bool train, int64_t dropout_seed, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__cudnn_init_dropout_state_typed_handle();
    return op.call(dropout, train, dropout_seed, dtype, layout, device, pin_memory);
}

// aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor _cudnn_init_dropout_state::redispatch(c10::DispatchKeySet dispatchKeySet, double dropout, bool train, int64_t dropout_seed, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__cudnn_init_dropout_state_typed_handle();
    return op.redispatch(dispatchKeySet, dropout, train, dropout_seed, dtype, layout, device, pin_memory);
}

// aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<native_dropout::schema> create_native_dropout_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_dropout::name, native_dropout::overload_name)
      .typed<native_dropout::schema>();
}

// aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> native_dropout::call(const at::Tensor & input, double p, ::std::optional<bool> train) {
    
    static auto op = create_native_dropout_typed_handle();
    return op.call(input, p, train);
}

// aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> native_dropout::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, ::std::optional<bool> train) {
    
    static auto op = create_native_dropout_typed_handle();
    return op.redispatch(dispatchKeySet, input, p, train);
}

// aten::absolute(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<absolute::schema> create_absolute_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(absolute::name, absolute::overload_name)
      .typed<absolute::schema>();
}

// aten::absolute(Tensor self) -> Tensor
at::Tensor absolute::call(const at::Tensor & self) {
    
    static auto op = create_absolute_typed_handle();
    return op.call(self);
}

// aten::absolute(Tensor self) -> Tensor
at::Tensor absolute::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_absolute_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::absolute_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<absolute_::schema> create_absolute__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(absolute_::name, absolute_::overload_name)
      .typed<absolute_::schema>();
}

// aten::absolute_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & absolute_::call(at::Tensor & self) {
    
    static auto op = create_absolute__typed_handle();
    return op.call(self);
}

// aten::absolute_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & absolute_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_absolute__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<absolute_out::schema> create_absolute_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(absolute_out::name, absolute_out::overload_name)
      .typed<absolute_out::schema>();
}

// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & absolute_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_absolute_out_typed_handle();
    return op.call(self, out);
}

// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & absolute_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_absolute_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::angle(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<angle::schema> create_angle_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(angle::name, angle::overload_name)
      .typed<angle::schema>();
}

// aten::angle(Tensor self) -> Tensor
at::Tensor angle::call(const at::Tensor & self) {
    
    static auto op = create_angle_typed_handle();
    return op.call(self);
}

// aten::angle(Tensor self) -> Tensor
at::Tensor angle::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_angle_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<angle_out::schema> create_angle_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(angle_out::name, angle_out::overload_name)
      .typed<angle_out::schema>();
}

// aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & angle_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_angle_out_typed_handle();
    return op.call(self, out);
}

// aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & angle_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_angle_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<add_Tensor::schema> create_add_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(add_Tensor::name, add_Tensor::overload_name)
      .typed<add_Tensor::schema>();
}

// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
at::Tensor add_Tensor::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_add_Tensor_typed_handle();
    return op.call(self, other, alpha);
}

// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
at::Tensor add_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_add_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<add__Tensor::schema> create_add__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(add__Tensor::name, add__Tensor::overload_name)
      .typed<add__Tensor::schema>();
}

// aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
at::Tensor & add__Tensor::call(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_add__Tensor_typed_handle();
    return op.call(self, other, alpha);
}

// aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
at::Tensor & add__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_add__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<add_out::schema> create_add_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(add_out::name, add_out::overload_name)
      .typed<add_out::schema>();
}

// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & add_out::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_add_out_typed_handle();
    return op.call(self, other, alpha, out);
}

// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & add_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_add_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha, out);
}

// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<add_Scalar::schema> create_add_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(add_Scalar::name, add_Scalar::overload_name)
      .typed<add_Scalar::schema>();
}

// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
at::Tensor add_Scalar::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_add_Scalar_typed_handle();
    return op.call(self, other, alpha);
}

// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
at::Tensor add_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_add_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<add__Scalar::schema> create_add__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(add__Scalar::name, add__Scalar::overload_name)
      .typed<add__Scalar::schema>();
}

// aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
at::Tensor & add__Scalar::call(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_add__Scalar_typed_handle();
    return op.call(self, other, alpha);
}

// aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
at::Tensor & add__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_add__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<all_dim::schema> create_all_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all_dim::name, all_dim::overload_name)
      .typed<all_dim::schema>();
}

// aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
at::Tensor all_dim::call(const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_all_dim_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
at::Tensor all_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_all_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<all_dims::schema> create_all_dims_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all_dims::name, all_dims::overload_name)
      .typed<all_dims::schema>();
}

// aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor
at::Tensor all_dims::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
    
    static auto op = create_all_dims_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor
at::Tensor all_dims::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
    
    static auto op = create_all_dims_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<all_out::schema> create_all_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all_out::name, all_out::overload_name)
      .typed<all_out::schema>();
}

// aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_out::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_all_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_all_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<all_dims_out::schema> create_all_dims_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all_dims_out::name, all_dims_out::overload_name)
      .typed<all_dims_out::schema>();
}

// aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_dims_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_all_dims_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_dims_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_all_dims_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<all_dimname::schema> create_all_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all_dimname::name, all_dimname::overload_name)
      .typed<all_dimname::schema>();
}

// aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
at::Tensor all_dimname::call(const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_all_dimname_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
at::Tensor all_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_all_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<all_dimname_out::schema> create_all_dimname_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all_dimname_out::name, all_dimname_out::overload_name)
      .typed<all_dimname_out::schema>();
}

// aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_dimname_out::call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_all_dimname_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_all_dimname_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::asinh(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<asinh::schema> create_asinh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(asinh::name, asinh::overload_name)
      .typed<asinh::schema>();
}

// aten::asinh(Tensor self) -> Tensor
at::Tensor asinh::call(const at::Tensor & self) {
    
    static auto op = create_asinh_typed_handle();
    return op.call(self);
}

// aten::asinh(Tensor self) -> Tensor
at::Tensor asinh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_asinh_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::asinh_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<asinh_::schema> create_asinh__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(asinh_::name, asinh_::overload_name)
      .typed<asinh_::schema>();
}

// aten::asinh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & asinh_::call(at::Tensor & self) {
    
    static auto op = create_asinh__typed_handle();
    return op.call(self);
}

// aten::asinh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & asinh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_asinh__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<asinh_out::schema> create_asinh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(asinh_out::name, asinh_out::overload_name)
      .typed<asinh_out::schema>();
}

// aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & asinh_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_asinh_out_typed_handle();
    return op.call(self, out);
}

// aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & asinh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_asinh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::atleast_2d(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<atleast_2d::schema> create_atleast_2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atleast_2d::name, atleast_2d::overload_name)
      .typed<atleast_2d::schema>();
}

// aten::atleast_2d(Tensor self) -> Tensor
at::Tensor atleast_2d::call(const at::Tensor & self) {
    
    static auto op = create_atleast_2d_typed_handle();
    return op.call(self);
}

// aten::atleast_2d(Tensor self) -> Tensor
at::Tensor atleast_2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_atleast_2d_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<atleast_2d_Sequence::schema> create_atleast_2d_Sequence_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atleast_2d_Sequence::name, atleast_2d_Sequence::overload_name)
      .typed<atleast_2d_Sequence::schema>();
}

// aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> atleast_2d_Sequence::call(at::TensorList tensors) {
    
    static auto op = create_atleast_2d_Sequence_typed_handle();
    return op.call(tensors);
}

// aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> atleast_2d_Sequence::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_atleast_2d_Sequence_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<baddbmm::schema> create_baddbmm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(baddbmm::name, baddbmm::overload_name)
      .typed<baddbmm::schema>();
}

// aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor baddbmm::call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_baddbmm_typed_handle();
    return op.call(self, batch1, batch2, beta, alpha);
}

// aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor baddbmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_baddbmm_typed_handle();
    return op.redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha);
}

// aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<baddbmm_::schema> create_baddbmm__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(baddbmm_::name, baddbmm_::overload_name)
      .typed<baddbmm_::schema>();
}

// aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
at::Tensor & baddbmm_::call(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_baddbmm__typed_handle();
    return op.call(self, batch1, batch2, beta, alpha);
}

// aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
at::Tensor & baddbmm_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_baddbmm__typed_handle();
    return op.redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha);
}

// aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<baddbmm_out::schema> create_baddbmm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(baddbmm_out::name, baddbmm_out::overload_name)
      .typed<baddbmm_out::schema>();
}

// aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & baddbmm_out::call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_baddbmm_out_typed_handle();
    return op.call(self, batch1, batch2, beta, alpha, out);
}

// aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & baddbmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_baddbmm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha, out);
}

// aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm::schema> create_batch_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm::name, batch_norm::overload_name)
      .typed<batch_norm::schema>();
}

// aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor
at::Tensor batch_norm::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
    
    static auto op = create_batch_norm_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
}

// aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor
at::Tensor batch_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
    
    static auto op = create_batch_norm_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
}

// aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli::schema> create_bernoulli_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli::name, bernoulli::overload_name)
      .typed<bernoulli::schema>();
}

// aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor
at::Tensor bernoulli::call(const at::Tensor & self, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli_typed_handle();
    return op.call(self, generator);
}

// aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor
at::Tensor bernoulli::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli_typed_handle();
    return op.redispatch(dispatchKeySet, self, generator);
}

// aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli_out::schema> create_bernoulli_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli_out::name, bernoulli_out::overload_name)
      .typed<bernoulli_out::schema>();
}

// aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bernoulli_out::call(const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_bernoulli_out_typed_handle();
    return op.call(self, generator, out);
}

// aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bernoulli_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_bernoulli_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, generator, out);
}

// aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli__Tensor::schema> create_bernoulli__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli__Tensor::name, bernoulli__Tensor::overload_name)
      .typed<bernoulli__Tensor::schema>();
}

// aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & bernoulli__Tensor::call(at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli__Tensor_typed_handle();
    return op.call(self, p, generator);
}

// aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & bernoulli__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator);
}

// aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli__float::schema> create_bernoulli__float_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli__float::name, bernoulli__float::overload_name)
      .typed<bernoulli__float::schema>();
}

// aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & bernoulli__float::call(at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli__float_typed_handle();
    return op.call(self, p, generator);
}

// aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & bernoulli__float::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli__float_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator);
}

// aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli_p::schema> create_bernoulli_p_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli_p::name, bernoulli_p::overload_name)
      .typed<bernoulli_p::schema>();
}

// aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor
at::Tensor bernoulli_p::call(const at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli_p_typed_handle();
    return op.call(self, p, generator);
}

// aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor
at::Tensor bernoulli_p::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli_p_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator);
}

// aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bilinear::schema> create_bilinear_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bilinear::name, bilinear::overload_name)
      .typed<bilinear::schema>();
}

// aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor
at::Tensor bilinear::call(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias) {
    
    static auto op = create_bilinear_typed_handle();
    return op.call(input1, input2, weight, bias);
}

// aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor
at::Tensor bilinear::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias) {
    
    static auto op = create_bilinear_typed_handle();
    return op.redispatch(dispatchKeySet, input1, input2, weight, bias);
}

// aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<binary_cross_entropy_with_logits::schema> create_binary_cross_entropy_with_logits_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(binary_cross_entropy_with_logits::name, binary_cross_entropy_with_logits::overload_name)
      .typed<binary_cross_entropy_with_logits::schema>();
}

// aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor
at::Tensor binary_cross_entropy_with_logits::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & pos_weight, int64_t reduction) {
    
    static auto op = create_binary_cross_entropy_with_logits_typed_handle();
    return op.call(self, target, weight, pos_weight, reduction);
}

// aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor
at::Tensor binary_cross_entropy_with_logits::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & pos_weight, int64_t reduction) {
    
    static auto op = create_binary_cross_entropy_with_logits_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, pos_weight, reduction);
}

// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bincount::schema> create_bincount_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bincount::name, bincount::overload_name)
      .typed<bincount::schema>();
}

// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor
at::Tensor bincount::call(const at::Tensor & self, const ::std::optional<at::Tensor> & weights, int64_t minlength) {
    
    static auto op = create_bincount_typed_handle();
    return op.call(self, weights, minlength);
}

// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor
at::Tensor bincount::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & weights, int64_t minlength) {
    
    static auto op = create_bincount_typed_handle();
    return op.redispatch(dispatchKeySet, self, weights, minlength);
}

// aten::_lazy_clone(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_lazy_clone::schema> create__lazy_clone_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_lazy_clone::name, _lazy_clone::overload_name)
      .typed<_lazy_clone::schema>();
}

// aten::_lazy_clone(Tensor self) -> Tensor
at::Tensor _lazy_clone::call(const at::Tensor & self) {
    
    static auto op = create__lazy_clone_typed_handle();
    return op.call(self);
}

// aten::_lazy_clone(Tensor self) -> Tensor
at::Tensor _lazy_clone::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__lazy_clone_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::logical_and(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logical_and::schema> create_logical_and_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logical_and::name, logical_and::overload_name)
      .typed<logical_and::schema>();
}

// aten::logical_and(Tensor self, Tensor other) -> Tensor
at::Tensor logical_and::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_and_typed_handle();
    return op.call(self, other);
}

// aten::logical_and(Tensor self, Tensor other) -> Tensor
at::Tensor logical_and::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_and_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logical_and_::schema> create_logical_and__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logical_and_::name, logical_and_::overload_name)
      .typed<logical_and_::schema>();
}

// aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & logical_and_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_and__typed_handle();
    return op.call(self, other);
}

// aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & logical_and_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_and__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logical_and_out::schema> create_logical_and_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logical_and_out::name, logical_and_out::overload_name)
      .typed<logical_and_out::schema>();
}

// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logical_and_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_logical_and_out_typed_handle();
    return op.call(self, other, out);
}

// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logical_and_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_logical_and_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::block_diag(Tensor[] tensors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<block_diag::schema> create_block_diag_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(block_diag::name, block_diag::overload_name)
      .typed<block_diag::schema>();
}

// aten::block_diag(Tensor[] tensors) -> Tensor
at::Tensor block_diag::call(at::TensorList tensors) {
    
    static auto op = create_block_diag_typed_handle();
    return op.call(tensors);
}

// aten::block_diag(Tensor[] tensors) -> Tensor
at::Tensor block_diag::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_block_diag_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<unsafe_chunk::schema> create_unsafe_chunk_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unsafe_chunk::name, unsafe_chunk::overload_name)
      .typed<unsafe_chunk::schema>();
}

// aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]
::std::vector<at::Tensor> unsafe_chunk::call(const at::Tensor & self, int64_t chunks, int64_t dim) {
    
    static auto op = create_unsafe_chunk_typed_handle();
    return op.call(self, chunks, dim);
}

// aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]
::std::vector<at::Tensor> unsafe_chunk::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t chunks, int64_t dim) {
    
    static auto op = create_unsafe_chunk_typed_handle();
    return op.redispatch(dispatchKeySet, self, chunks, dim);
}

// aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<chunk::schema> create_chunk_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(chunk::name, chunk::overload_name)
      .typed<chunk::schema>();
}

// aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> chunk::call(const at::Tensor & self, int64_t chunks, int64_t dim) {
    
    static auto op = create_chunk_typed_handle();
    return op.call(self, chunks, dim);
}

// aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> chunk::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t chunks, int64_t dim) {
    
    static auto op = create_chunk_typed_handle();
    return op.redispatch(dispatchKeySet, self, chunks, dim);
}

// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<tensor_split_sections::schema> create_tensor_split_sections_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tensor_split_sections::name, tensor_split_sections::overload_name)
      .typed<tensor_split_sections::schema>();
}

// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> tensor_split_sections::call(const at::Tensor & self, c10::SymInt sections, int64_t dim) {
    
    static auto op = create_tensor_split_sections_typed_handle();
    return op.call(self, sections, dim);
}

// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> tensor_split_sections::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt sections, int64_t dim) {
    
    static auto op = create_tensor_split_sections_typed_handle();
    return op.redispatch(dispatchKeySet, self, sections, dim);
}

// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<tensor_split_indices::schema> create_tensor_split_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tensor_split_indices::name, tensor_split_indices::overload_name)
      .typed<tensor_split_indices::schema>();
}

// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> tensor_split_indices::call(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) {
    
    static auto op = create_tensor_split_indices_typed_handle();
    return op.call(self, indices, dim);
}

// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> tensor_split_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) {
    
    static auto op = create_tensor_split_indices_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, dim);
}

// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<tensor_split_tensor_indices_or_sections::schema> create_tensor_split_tensor_indices_or_sections_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tensor_split_tensor_indices_or_sections::name, tensor_split_tensor_indices_or_sections::overload_name)
      .typed<tensor_split_tensor_indices_or_sections::schema>();
}

// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> tensor_split_tensor_indices_or_sections::call(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) {
    
    static auto op = create_tensor_split_tensor_indices_or_sections_typed_handle();
    return op.call(self, tensor_indices_or_sections, dim);
}

// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> tensor_split_tensor_indices_or_sections::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) {
    
    static auto op = create_tensor_split_tensor_indices_or_sections_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor_indices_or_sections, dim);
}

// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<clamp::schema> create_clamp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp::name, clamp::overload_name)
      .typed<clamp::schema>();
}

// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
at::Tensor clamp::call(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clamp_typed_handle();
    return op.call(self, min, max);
}

// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
at::Tensor clamp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clamp_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<clamp_Tensor::schema> create_clamp_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_Tensor::name, clamp_Tensor::overload_name)
      .typed<clamp_Tensor::schema>();
}

// aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
at::Tensor clamp_Tensor::call(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clamp_Tensor_typed_handle();
    return op.call(self, min, max);
}

// aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
at::Tensor clamp_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clamp_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_::schema> create_clamp__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_::name, clamp_::overload_name)
      .typed<clamp_::schema>();
}

// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
at::Tensor & clamp_::call(at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clamp__typed_handle();
    return op.call(self, min, max);
}

// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
at::Tensor & clamp_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clamp__typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp__Tensor::schema> create_clamp__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp__Tensor::name, clamp__Tensor::overload_name)
      .typed<clamp__Tensor::schema>();
}

// aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
at::Tensor & clamp__Tensor::call(at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clamp__Tensor_typed_handle();
    return op.call(self, min, max);
}

// aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
at::Tensor & clamp__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clamp__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_out::schema> create_clamp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_out::name, clamp_out::overload_name)
      .typed<clamp_out::schema>();
}

// aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max, at::Tensor & out) {
    
    static auto op = create_clamp_out_typed_handle();
    return op.call(self, min, max, out);
}

// aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max, at::Tensor & out) {
    
    static auto op = create_clamp_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max, out);
}

// aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_Tensor_out::schema> create_clamp_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_Tensor_out::name, clamp_Tensor_out::overload_name)
      .typed<clamp_Tensor_out::schema>();
}

// aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_Tensor_out::call(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max, at::Tensor & out) {
    
    static auto op = create_clamp_Tensor_out_typed_handle();
    return op.call(self, min, max, out);
}

// aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max, at::Tensor & out) {
    
    static auto op = create_clamp_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max, out);
}

// aten::clamp_max(Tensor self, Scalar max) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<clamp_max::schema> create_clamp_max_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_max::name, clamp_max::overload_name)
      .typed<clamp_max::schema>();
}

// aten::clamp_max(Tensor self, Scalar max) -> Tensor
at::Tensor clamp_max::call(const at::Tensor & self, const at::Scalar & max) {
    
    static auto op = create_clamp_max_typed_handle();
    return op.call(self, max);
}

// aten::clamp_max(Tensor self, Scalar max) -> Tensor
at::Tensor clamp_max::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & max) {
    
    static auto op = create_clamp_max_typed_handle();
    return op.redispatch(dispatchKeySet, self, max);
}

// aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<clamp_max_Tensor::schema> create_clamp_max_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_max_Tensor::name, clamp_max_Tensor::overload_name)
      .typed<clamp_max_Tensor::schema>();
}

// aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor
at::Tensor clamp_max_Tensor::call(const at::Tensor & self, const at::Tensor & max) {
    
    static auto op = create_clamp_max_Tensor_typed_handle();
    return op.call(self, max);
}

// aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor
at::Tensor clamp_max_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & max) {
    
    static auto op = create_clamp_max_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, max);
}

// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_max_::schema> create_clamp_max__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_max_::name, clamp_max_::overload_name)
      .typed<clamp_max_::schema>();
}

// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)
at::Tensor & clamp_max_::call(at::Tensor & self, const at::Scalar & max) {
    
    static auto op = create_clamp_max__typed_handle();
    return op.call(self, max);
}

// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)
at::Tensor & clamp_max_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & max) {
    
    static auto op = create_clamp_max__typed_handle();
    return op.redispatch(dispatchKeySet, self, max);
}

// aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_max__Tensor::schema> create_clamp_max__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_max__Tensor::name, clamp_max__Tensor::overload_name)
      .typed<clamp_max__Tensor::schema>();
}

// aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)
at::Tensor & clamp_max__Tensor::call(at::Tensor & self, const at::Tensor & max) {
    
    static auto op = create_clamp_max__Tensor_typed_handle();
    return op.call(self, max);
}

// aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)
at::Tensor & clamp_max__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & max) {
    
    static auto op = create_clamp_max__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, max);
}

// aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_max_out::schema> create_clamp_max_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_max_out::name, clamp_max_out::overload_name)
      .typed<clamp_max_out::schema>();
}

// aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_max_out::call(const at::Tensor & self, const at::Scalar & max, at::Tensor & out) {
    
    static auto op = create_clamp_max_out_typed_handle();
    return op.call(self, max, out);
}

// aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_max_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & max, at::Tensor & out) {
    
    static auto op = create_clamp_max_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, max, out);
}

// aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_max_Tensor_out::schema> create_clamp_max_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_max_Tensor_out::name, clamp_max_Tensor_out::overload_name)
      .typed<clamp_max_Tensor_out::schema>();
}

// aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_max_Tensor_out::call(const at::Tensor & self, const at::Tensor & max, at::Tensor & out) {
    
    static auto op = create_clamp_max_Tensor_out_typed_handle();
    return op.call(self, max, out);
}

// aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_max_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & max, at::Tensor & out) {
    
    static auto op = create_clamp_max_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, max, out);
}

// aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<clip::schema> create_clip_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clip::name, clip::overload_name)
      .typed<clip::schema>();
}

// aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
at::Tensor clip::call(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clip_typed_handle();
    return op.call(self, min, max);
}

// aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
at::Tensor clip::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clip_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<clip_Tensor::schema> create_clip_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clip_Tensor::name, clip_Tensor::overload_name)
      .typed<clip_Tensor::schema>();
}

// aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
at::Tensor clip_Tensor::call(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clip_Tensor_typed_handle();
    return op.call(self, min, max);
}

// aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
at::Tensor clip_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clip_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clip_::schema> create_clip__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clip_::name, clip_::overload_name)
      .typed<clip_::schema>();
}

// aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
at::Tensor & clip_::call(at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clip__typed_handle();
    return op.call(self, min, max);
}

// aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
at::Tensor & clip_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clip__typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clip__Tensor::schema> create_clip__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clip__Tensor::name, clip__Tensor::overload_name)
      .typed<clip__Tensor::schema>();
}

// aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
at::Tensor & clip__Tensor::call(at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clip__Tensor_typed_handle();
    return op.call(self, min, max);
}

// aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
at::Tensor & clip__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clip__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clip_out::schema> create_clip_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clip_out::name, clip_out::overload_name)
      .typed<clip_out::schema>();
}

// aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clip_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max, at::Tensor & out) {
    
    static auto op = create_clip_out_typed_handle();
    return op.call(self, min, max, out);
}

// aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clip_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max, at::Tensor & out) {
    
    static auto op = create_clip_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max, out);
}

// aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clip_Tensor_out::schema> create_clip_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clip_Tensor_out::name, clip_Tensor_out::overload_name)
      .typed<clip_Tensor_out::schema>();
}

// aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clip_Tensor_out::call(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max, at::Tensor & out) {
    
    static auto op = create_clip_Tensor_out_typed_handle();
    return op.call(self, min, max, out);
}

// aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clip_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max, at::Tensor & out) {
    
    static auto op = create_clip_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max, out);
}

// aten::cudnn_is_acceptable(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_is_acceptable::schema> create_cudnn_is_acceptable_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_is_acceptable::name, cudnn_is_acceptable::overload_name)
      .typed<cudnn_is_acceptable::schema>();
}

// aten::cudnn_is_acceptable(Tensor self) -> bool
bool cudnn_is_acceptable::call(const at::Tensor & self) {
    
    static auto op = create_cudnn_is_acceptable_typed_handle();
    return op.call(self);
}

// aten::cudnn_is_acceptable(Tensor self) -> bool
bool cudnn_is_acceptable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_cudnn_is_acceptable_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::complex(Tensor real, Tensor imag) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<complex::schema> create_complex_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(complex::name, complex::overload_name)
      .typed<complex::schema>();
}

// aten::complex(Tensor real, Tensor imag) -> Tensor
at::Tensor complex::call(const at::Tensor & real, const at::Tensor & imag) {
    
    static auto op = create_complex_typed_handle();
    return op.call(real, imag);
}

// aten::complex(Tensor real, Tensor imag) -> Tensor
at::Tensor complex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & real, const at::Tensor & imag) {
    
    static auto op = create_complex_typed_handle();
    return op.redispatch(dispatchKeySet, real, imag);
}

// aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<complex_out::schema> create_complex_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(complex_out::name, complex_out::overload_name)
      .typed<complex_out::schema>();
}

// aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & complex_out::call(const at::Tensor & real, const at::Tensor & imag, at::Tensor & out) {
    
    static auto op = create_complex_out_typed_handle();
    return op.call(real, imag, out);
}

// aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & complex_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & real, const at::Tensor & imag, at::Tensor & out) {
    
    static auto op = create_complex_out_typed_handle();
    return op.redispatch(dispatchKeySet, real, imag, out);
}

// aten::polar(Tensor abs, Tensor angle) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<polar::schema> create_polar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(polar::name, polar::overload_name)
      .typed<polar::schema>();
}

// aten::polar(Tensor abs, Tensor angle) -> Tensor
at::Tensor polar::call(const at::Tensor & abs, const at::Tensor & angle) {
    
    static auto op = create_polar_typed_handle();
    return op.call(abs, angle);
}

// aten::polar(Tensor abs, Tensor angle) -> Tensor
at::Tensor polar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & abs, const at::Tensor & angle) {
    
    static auto op = create_polar_typed_handle();
    return op.redispatch(dispatchKeySet, abs, angle);
}

// aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<polar_out::schema> create_polar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(polar_out::name, polar_out::overload_name)
      .typed<polar_out::schema>();
}

// aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & polar_out::call(const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out) {
    
    static auto op = create_polar_out_typed_handle();
    return op.call(abs, angle, out);
}

// aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & polar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out) {
    
    static auto op = create_polar_out_typed_handle();
    return op.redispatch(dispatchKeySet, abs, angle, out);
}

// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<conv_transpose2d_input::schema> create_conv_transpose2d_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conv_transpose2d_input::name, conv_transpose2d_input::overload_name)
      .typed<conv_transpose2d_input::schema>();
}

// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor
at::Tensor conv_transpose2d_input::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) {
    
    static auto op = create_conv_transpose2d_input_typed_handle();
    return op.call(input, weight, bias, stride, padding, output_padding, groups, dilation);
}

// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor
at::Tensor conv_transpose2d_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) {
    
    static auto op = create_conv_transpose2d_input_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, output_padding, groups, dilation);
}

// aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<count_nonzero_dim_IntList::schema> create_count_nonzero_dim_IntList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(count_nonzero_dim_IntList::name, count_nonzero_dim_IntList::overload_name)
      .typed<count_nonzero_dim_IntList::schema>();
}

// aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor
at::Tensor count_nonzero_dim_IntList::call(const at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create_count_nonzero_dim_IntList_typed_handle();
    return op.call(self, dim);
}

// aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor
at::Tensor count_nonzero_dim_IntList::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create_count_nonzero_dim_IntList_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::count_nonzero(Tensor self, int? dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<count_nonzero::schema> create_count_nonzero_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(count_nonzero::name, count_nonzero::overload_name)
      .typed<count_nonzero::schema>();
}

// aten::count_nonzero(Tensor self, int? dim=None) -> Tensor
at::Tensor count_nonzero::call(const at::Tensor & self, ::std::optional<int64_t> dim) {
    
    static auto op = create_count_nonzero_typed_handle();
    return op.call(self, dim);
}

// aten::count_nonzero(Tensor self, int? dim=None) -> Tensor
at::Tensor count_nonzero::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim) {
    
    static auto op = create_count_nonzero_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cov::schema> create_cov_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cov::name, cov::overload_name)
      .typed<cov::schema>();
}

// aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor
at::Tensor cov::call(const at::Tensor & self, int64_t correction, const ::std::optional<at::Tensor> & fweights, const ::std::optional<at::Tensor> & aweights) {
    
    static auto op = create_cov_typed_handle();
    return op.call(self, correction, fweights, aweights);
}

// aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor
at::Tensor cov::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t correction, const ::std::optional<at::Tensor> & fweights, const ::std::optional<at::Tensor> & aweights) {
    
    static auto op = create_cov_typed_handle();
    return op.redispatch(dispatchKeySet, self, correction, fweights, aweights);
}

// aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution_add_relu::schema> create_cudnn_convolution_add_relu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_convolution_add_relu::name, cudnn_convolution_add_relu::overload_name)
      .typed<cudnn_convolution_add_relu::schema>();
}

// aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor cudnn_convolution_add_relu::call(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const ::std::optional<at::Scalar> & alpha, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_cudnn_convolution_add_relu_typed_handle();
    return op.call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
}

// aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor cudnn_convolution_add_relu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const ::std::optional<at::Scalar> & alpha, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_cudnn_convolution_add_relu_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, z, alpha, bias, stride, padding, dilation, groups);
}

// aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<cummax::schema> create_cummax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cummax::name, cummax::overload_name)
      .typed<cummax::schema>();
}

// aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> cummax::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_cummax_typed_handle();
    return op.call(self, dim);
}

// aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> cummax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_cummax_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<cummax_out::schema> create_cummax_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cummax_out::name, cummax_out::overload_name)
      .typed<cummax_out::schema>();
}

// aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> cummax_out::call(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_cummax_out_typed_handle();
    return op.call(self, dim, values, indices);
}

// aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> cummax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_cummax_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, values, indices);
}

// aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<cummax_dimname::schema> create_cummax_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cummax_dimname::name, cummax_dimname::overload_name)
      .typed<cummax_dimname::schema>();
}

// aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> cummax_dimname::call(const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_cummax_dimname_typed_handle();
    return op.call(self, dim);
}

// aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> cummax_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_cummax_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<cummax_dimname_out::schema> create_cummax_dimname_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cummax_dimname_out::name, cummax_dimname_out::overload_name)
      .typed<cummax_dimname_out::schema>();
}

// aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> cummax_dimname_out::call(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_cummax_dimname_out_typed_handle();
    return op.call(self, dim, values, indices);
}

// aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> cummax_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_cummax_dimname_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, values, indices);
}

// aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_cummax_helper::schema> create__cummax_helper_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cummax_helper::name, _cummax_helper::overload_name)
      .typed<_cummax_helper::schema>();
}

// aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
void _cummax_helper::call(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
    
    static auto op = create__cummax_helper_typed_handle();
    return op.call(self, values, indices, dim);
}

// aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
void _cummax_helper::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
    
    static auto op = create__cummax_helper_typed_handle();
    return op.redispatch(dispatchKeySet, self, values, indices, dim);
}

// aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_ctc_loss_backward::schema> create__ctc_loss_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_ctc_loss_backward::name, _ctc_loss_backward::overload_name)
      .typed<_ctc_loss_backward::schema>();
}

// aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
at::Tensor _ctc_loss_backward::call(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
    
    static auto op = create__ctc_loss_backward_typed_handle();
    return op.call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
}

// aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
at::Tensor _ctc_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
    
    static auto op = create__ctc_loss_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
}

// aten::_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_ctc_loss_backward_Tensor::schema> create__ctc_loss_backward_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_ctc_loss_backward_Tensor::name, _ctc_loss_backward_Tensor::overload_name)
      .typed<_ctc_loss_backward_Tensor::schema>();
}

// aten::_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
at::Tensor _ctc_loss_backward_Tensor::call(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
    
    static auto op = create__ctc_loss_backward_Tensor_typed_handle();
    return op.call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
}

// aten::_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
at::Tensor _ctc_loss_backward_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
    
    static auto op = create__ctc_loss_backward_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
}

// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<diagonal_backward::schema> create_diagonal_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diagonal_backward::name, diagonal_backward::overload_name)
      .typed<diagonal_backward::schema>();
}

// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor
at::Tensor diagonal_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
    
    static auto op = create_diagonal_backward_typed_handle();
    return op.call(grad_output, input_sizes, offset, dim1, dim2);
}

// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor
at::Tensor diagonal_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
    
    static auto op = create_diagonal_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input_sizes, offset, dim1, dim2);
}

// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<diff::schema> create_diff_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diff::name, diff::overload_name)
      .typed<diff::schema>();
}

// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor
at::Tensor diff::call(const at::Tensor & self, int64_t n, int64_t dim, const ::std::optional<at::Tensor> & prepend, const ::std::optional<at::Tensor> & append) {
    
    static auto op = create_diff_typed_handle();
    return op.call(self, n, dim, prepend, append);
}

// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor
at::Tensor diff::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, int64_t dim, const ::std::optional<at::Tensor> & prepend, const ::std::optional<at::Tensor> & append) {
    
    static auto op = create_diff_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, prepend, append);
}

// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<diff_out::schema> create_diff_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diff_out::name, diff_out::overload_name)
      .typed<diff_out::schema>();
}

// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diff_out::call(const at::Tensor & self, int64_t n, int64_t dim, const ::std::optional<at::Tensor> & prepend, const ::std::optional<at::Tensor> & append, at::Tensor & out) {
    
    static auto op = create_diff_out_typed_handle();
    return op.call(self, n, dim, prepend, append, out);
}

// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diff_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, int64_t dim, const ::std::optional<at::Tensor> & prepend, const ::std::optional<at::Tensor> & append, at::Tensor & out) {
    
    static auto op = create_diff_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, prepend, append, out);
}

// aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<gradient_scalarint::schema> create_gradient_scalarint_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gradient_scalarint::name, gradient_scalarint::overload_name)
      .typed<gradient_scalarint::schema>();
}

// aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalarint::call(const at::Tensor & self, const ::std::optional<at::Scalar> & spacing, ::std::optional<int64_t> dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalarint_typed_handle();
    return op.call(self, spacing, dim, edge_order);
}

// aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalarint::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & spacing, ::std::optional<int64_t> dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalarint_typed_handle();
    return op.redispatch(dispatchKeySet, self, spacing, dim, edge_order);
}

// aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<gradient_scalararray::schema> create_gradient_scalararray_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gradient_scalararray::name, gradient_scalararray::overload_name)
      .typed<gradient_scalararray::schema>();
}

// aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalararray::call(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalararray_typed_handle();
    return op.call(self, spacing, dim, edge_order);
}

// aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalararray::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalararray_typed_handle();
    return op.redispatch(dispatchKeySet, self, spacing, dim, edge_order);
}

// aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<gradient_array::schema> create_gradient_array_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gradient_array::name, gradient_array::overload_name)
      .typed<gradient_array::schema>();
}

// aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_array::call(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_array_typed_handle();
    return op.call(self, dim, edge_order);
}

// aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_array::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_array_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, edge_order);
}

// aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<gradient_scalarrayint::schema> create_gradient_scalarrayint_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gradient_scalarrayint::name, gradient_scalarrayint::overload_name)
      .typed<gradient_scalarrayint::schema>();
}

// aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalarrayint::call(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, ::std::optional<int64_t> dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalarrayint_typed_handle();
    return op.call(self, spacing, dim, edge_order);
}

// aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalarrayint::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, ::std::optional<int64_t> dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalarrayint_typed_handle();
    return op.redispatch(dispatchKeySet, self, spacing, dim, edge_order);
}

// aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<gradient_scalarrayarray::schema> create_gradient_scalarrayarray_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gradient_scalarrayarray::name, gradient_scalarrayarray::overload_name)
      .typed<gradient_scalarrayarray::schema>();
}

// aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalarrayarray::call(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalarrayarray_typed_handle();
    return op.call(self, spacing, dim, edge_order);
}

// aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalarrayarray::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalarrayarray_typed_handle();
    return op.redispatch(dispatchKeySet, self, spacing, dim, edge_order);
}

// aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<gradient_tensorarrayint::schema> create_gradient_tensorarrayint_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gradient_tensorarrayint::name, gradient_tensorarrayint::overload_name)
      .typed<gradient_tensorarrayint::schema>();
}

// aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_tensorarrayint::call(const at::Tensor & self, at::TensorList spacing, ::std::optional<int64_t> dim, int64_t edge_order) {
    
    static auto op = create_gradient_tensorarrayint_typed_handle();
    return op.call(self, spacing, dim, edge_order);
}

// aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_tensorarrayint::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList spacing, ::std::optional<int64_t> dim, int64_t edge_order) {
    
    static auto op = create_gradient_tensorarrayint_typed_handle();
    return op.redispatch(dispatchKeySet, self, spacing, dim, edge_order);
}

// aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<gradient_tensorarray::schema> create_gradient_tensorarray_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gradient_tensorarray::name, gradient_tensorarray::overload_name)
      .typed<gradient_tensorarray::schema>();
}

// aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_tensorarray::call(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_tensorarray_typed_handle();
    return op.call(self, spacing, dim, edge_order);
}

// aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_tensorarray::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_tensorarray_typed_handle();
    return op.redispatch(dispatchKeySet, self, spacing, dim, edge_order);
}

// aten::dot(Tensor self, Tensor tensor) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<dot::schema> create_dot_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dot::name, dot::overload_name)
      .typed<dot::schema>();
}

// aten::dot(Tensor self, Tensor tensor) -> Tensor
at::Tensor dot::call(const at::Tensor & self, const at::Tensor & tensor) {
    
    static auto op = create_dot_typed_handle();
    return op.call(self, tensor);
}

// aten::dot(Tensor self, Tensor tensor) -> Tensor
at::Tensor dot::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor) {
    
    static auto op = create_dot_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor);
}

// aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<dot_out::schema> create_dot_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dot_out::name, dot_out::overload_name)
      .typed<dot_out::schema>();
}

// aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & dot_out::call(const at::Tensor & self, const at::Tensor & tensor, at::Tensor & out) {
    
    static auto op = create_dot_out_typed_handle();
    return op.call(self, tensor, out);
}

// aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & dot_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor, at::Tensor & out) {
    
    static auto op = create_dot_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor, out);
}

// aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<einsum::schema> create_einsum_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(einsum::name, einsum::overload_name)
      .typed<einsum::schema>();
}

// aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor
at::Tensor einsum::call(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) {
    
    static auto op = create_einsum_typed_handle();
    return op.call(equation, tensors, path);
}

// aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor
at::Tensor einsum::redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) {
    
    static auto op = create_einsum_typed_handle();
    return op.redispatch(dispatchKeySet, equation, tensors, path);
}

// aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<embedding_renorm_::schema> create_embedding_renorm__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(embedding_renorm_::name, embedding_renorm_::overload_name)
      .typed<embedding_renorm_::schema>();
}

// aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)
at::Tensor & embedding_renorm_::call(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
    
    static auto op = create_embedding_renorm__typed_handle();
    return op.call(self, indices, max_norm, norm_type);
}

// aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)
at::Tensor & embedding_renorm_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
    
    static auto op = create_embedding_renorm__typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, max_norm, norm_type);
}

// aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<embedding_sparse_backward::schema> create_embedding_sparse_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(embedding_sparse_backward::name, embedding_sparse_backward::overload_name)
      .typed<embedding_sparse_backward::schema>();
}

// aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor
at::Tensor embedding_sparse_backward::call(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
    
    static auto op = create_embedding_sparse_backward_typed_handle();
    return op.call(grad, indices, num_weights, padding_idx, scale_grad_by_freq);
}

// aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor
at::Tensor embedding_sparse_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
    
    static auto op = create_embedding_sparse_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, indices, num_weights, padding_idx, scale_grad_by_freq);
}

// aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_per_sample_weights_backward::schema> create__embedding_bag_per_sample_weights_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_embedding_bag_per_sample_weights_backward::name, _embedding_bag_per_sample_weights_backward::overload_name)
      .typed<_embedding_bag_per_sample_weights_backward::schema>();
}

// aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor
at::Tensor _embedding_bag_per_sample_weights_backward::call(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) {
    
    static auto op = create__embedding_bag_per_sample_weights_backward_typed_handle();
    return op.call(grad, weight, indices, offsets, offset2bag, mode, padding_idx);
}

// aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor
at::Tensor _embedding_bag_per_sample_weights_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) {
    
    static auto op = create__embedding_bag_per_sample_weights_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, weight, indices, offsets, offset2bag, mode, padding_idx);
}

// aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<empty_names::schema> create_empty_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_names::name, empty_names::overload_name)
      .typed<empty_names::schema>();
}

// aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor empty_names::call(at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_empty_names_typed_handle();
    return op.call(size, names, dtype, layout, device, pin_memory, memory_format);
}

// aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor empty_names::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_empty_names_typed_handle();
    return op.redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory, memory_format);
}

// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<empty_memory_format::schema> create_empty_memory_format_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_memory_format::name, empty_memory_format::overload_name)
      .typed<empty_memory_format::schema>();
}

// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor empty_memory_format::call(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_empty_memory_format_typed_handle();
    return op.call(size, dtype, layout, device, pin_memory, memory_format);
}

// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor empty_memory_format::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_empty_memory_format_typed_handle();
    return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory, memory_format);
}

// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<new_empty_strided::schema> create_new_empty_strided_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_empty_strided::name, new_empty_strided::overload_name)
      .typed<new_empty_strided::schema>();
}

// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_empty_strided::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_empty_strided_typed_handle();
    return op.call(self, size, stride, dtype, layout, device, pin_memory);
}

// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_empty_strided::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_empty_strided_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, stride, dtype, layout, device, pin_memory);
}

// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<new_full::schema> create_new_full_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_full::name, new_full::overload_name)
      .typed<new_full::schema>();
}

// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_full::call(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_full_typed_handle();
    return op.call(self, size, fill_value, dtype, layout, device, pin_memory);
}

// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_full::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_full_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, fill_value, dtype, layout, device, pin_memory);
}

// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<new_ones::schema> create_new_ones_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_ones::name, new_ones::overload_name)
      .typed<new_ones::schema>();
}

// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_ones::call(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_ones_typed_handle();
    return op.call(self, size, dtype, layout, device, pin_memory);
}

// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_ones::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_ones_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, dtype, layout, device, pin_memory);
}

// aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_empty_per_channel_affine_quantized::schema> create__empty_per_channel_affine_quantized_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_empty_per_channel_affine_quantized::name, _empty_per_channel_affine_quantized::overload_name)
      .typed<_empty_per_channel_affine_quantized::schema>();
}

// aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
at::Tensor _empty_per_channel_affine_quantized::call(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create__empty_per_channel_affine_quantized_typed_handle();
    return op.call(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
}

// aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
at::Tensor _empty_per_channel_affine_quantized::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create__empty_per_channel_affine_quantized_typed_handle();
    return op.redispatch(dispatchKeySet, size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
}

// aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<empty_quantized::schema> create_empty_quantized_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_quantized::name, empty_quantized::overload_name)
      .typed<empty_quantized::schema>();
}

// aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor empty_quantized::call(at::IntArrayRef size, const at::Tensor & qtensor, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_empty_quantized_typed_handle();
    return op.call(size, qtensor, dtype, layout, device, pin_memory, memory_format);
}

// aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor empty_quantized::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & qtensor, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_empty_quantized_typed_handle();
    return op.redispatch(dispatchKeySet, size, qtensor, dtype, layout, device, pin_memory, memory_format);
}

// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<empty_out::schema> create_empty_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_out::name, empty_out::overload_name)
      .typed<empty_out::schema>();
}

// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_out::call(c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_empty_out_typed_handle();
    return op.call(size, memory_format, out);
}

// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_empty_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, memory_format, out);
}

// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<empty_strided::schema> create_empty_strided_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_strided::name, empty_strided::overload_name)
      .typed<empty_strided::schema>();
}

// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor empty_strided::call(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_empty_strided_typed_handle();
    return op.call(size, stride, dtype, layout, device, pin_memory);
}

// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor empty_strided::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_empty_strided_typed_handle();
    return op.redispatch(dispatchKeySet, size, stride, dtype, layout, device, pin_memory);
}

// aten::exp(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<exp::schema> create_exp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(exp::name, exp::overload_name)
      .typed<exp::schema>();
}

// aten::exp(Tensor self) -> Tensor
at::Tensor exp::call(const at::Tensor & self) {
    
    static auto op = create_exp_typed_handle();
    return op.call(self);
}

// aten::exp(Tensor self) -> Tensor
at::Tensor exp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_exp_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::exp_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<exp_::schema> create_exp__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(exp_::name, exp_::overload_name)
      .typed<exp_::schema>();
}

// aten::exp_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & exp_::call(at::Tensor & self) {
    
    static auto op = create_exp__typed_handle();
    return op.call(self);
}

// aten::exp_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & exp_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_exp__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<exp_out::schema> create_exp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(exp_out::name, exp_out::overload_name)
      .typed<exp_out::schema>();
}

// aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & exp_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_exp_out_typed_handle();
    return op.call(self, out);
}

// aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & exp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_exp_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::exp2(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<exp2::schema> create_exp2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(exp2::name, exp2::overload_name)
      .typed<exp2::schema>();
}

// aten::exp2(Tensor self) -> Tensor
at::Tensor exp2::call(const at::Tensor & self) {
    
    static auto op = create_exp2_typed_handle();
    return op.call(self);
}

// aten::exp2(Tensor self) -> Tensor
at::Tensor exp2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_exp2_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::exp2_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<exp2_::schema> create_exp2__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(exp2_::name, exp2_::overload_name)
      .typed<exp2_::schema>();
}

// aten::exp2_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & exp2_::call(at::Tensor & self) {
    
    static auto op = create_exp2__typed_handle();
    return op.call(self);
}

// aten::exp2_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & exp2_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_exp2__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<exp2_out::schema> create_exp2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(exp2_out::name, exp2_out::overload_name)
      .typed<exp2_out::schema>();
}

// aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & exp2_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_exp2_out_typed_handle();
    return op.call(self, out);
}

// aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & exp2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_exp2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<eye::schema> create_eye_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(eye::name, eye::overload_name)
      .typed<eye::schema>();
}

// aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor eye::call(c10::SymInt n, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_eye_typed_handle();
    return op.call(n, dtype, layout, device, pin_memory);
}

// aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor eye::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_eye_typed_handle();
    return op.redispatch(dispatchKeySet, n, dtype, layout, device, pin_memory);
}

// aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<eye_m::schema> create_eye_m_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(eye_m::name, eye_m::overload_name)
      .typed<eye_m::schema>();
}

// aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor eye_m::call(c10::SymInt n, c10::SymInt m, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_eye_m_typed_handle();
    return op.call(n, m, dtype, layout, device, pin_memory);
}

// aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor eye_m::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::SymInt m, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_eye_m_typed_handle();
    return op.redispatch(dispatchKeySet, n, m, dtype, layout, device, pin_memory);
}

// aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<eye_out::schema> create_eye_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(eye_out::name, eye_out::overload_name)
      .typed<eye_out::schema>();
}

// aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & eye_out::call(c10::SymInt n, at::Tensor & out) {
    
    static auto op = create_eye_out_typed_handle();
    return op.call(n, out);
}

// aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & eye_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, at::Tensor & out) {
    
    static auto op = create_eye_out_typed_handle();
    return op.redispatch(dispatchKeySet, n, out);
}

// aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<eye_m_out::schema> create_eye_m_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(eye_m_out::name, eye_m_out::overload_name)
      .typed<eye_m_out::schema>();
}

// aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & eye_m_out::call(c10::SymInt n, c10::SymInt m, at::Tensor & out) {
    
    static auto op = create_eye_m_out_typed_handle();
    return op.call(n, m, out);
}

// aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & eye_m_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::SymInt m, at::Tensor & out) {
    
    static auto op = create_eye_m_out_typed_handle();
    return op.redispatch(dispatchKeySet, n, m, out);
}

// aten::frac(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<frac::schema> create_frac_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(frac::name, frac::overload_name)
      .typed<frac::schema>();
}

// aten::frac(Tensor self) -> Tensor
at::Tensor frac::call(const at::Tensor & self) {
    
    static auto op = create_frac_typed_handle();
    return op.call(self);
}

// aten::frac(Tensor self) -> Tensor
at::Tensor frac::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_frac_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::frac_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<frac_::schema> create_frac__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(frac_::name, frac_::overload_name)
      .typed<frac_::schema>();
}

// aten::frac_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & frac_::call(at::Tensor & self) {
    
    static auto op = create_frac__typed_handle();
    return op.call(self);
}

// aten::frac_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & frac_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_frac__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<frac_out::schema> create_frac_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(frac_out::name, frac_out::overload_name)
      .typed<frac_out::schema>();
}

// aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & frac_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_frac_out_typed_handle();
    return op.call(self, out);
}

// aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & frac_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_frac_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<from_file::schema> create_from_file_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(from_file::name, from_file::overload_name)
      .typed<from_file::schema>();
}

// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor from_file::call(c10::string_view filename, ::std::optional<bool> shared, ::std::optional<int64_t> size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_from_file_typed_handle();
    return op.call(filename, shared, size, dtype, layout, device, pin_memory);
}

// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor from_file::redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, ::std::optional<bool> shared, ::std::optional<int64_t> size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_from_file_typed_handle();
    return op.redispatch(dispatchKeySet, filename, shared, size, dtype, layout, device, pin_memory);
}

// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gcd_out::schema> create_gcd_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gcd_out::name, gcd_out::overload_name)
      .typed<gcd_out::schema>();
}

// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gcd_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_gcd_out_typed_handle();
    return op.call(self, other, out);
}

// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gcd_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_gcd_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::gcd(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<gcd::schema> create_gcd_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gcd::name, gcd::overload_name)
      .typed<gcd::schema>();
}

// aten::gcd(Tensor self, Tensor other) -> Tensor
at::Tensor gcd::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gcd_typed_handle();
    return op.call(self, other);
}

// aten::gcd(Tensor self, Tensor other) -> Tensor
at::Tensor gcd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gcd_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gcd_::schema> create_gcd__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gcd_::name, gcd_::overload_name)
      .typed<gcd_::schema>();
}

// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & gcd_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gcd__typed_handle();
    return op.call(self, other);
}

// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & gcd_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gcd__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_cufft_clear_plan_cache(DeviceIndex device_index) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_cufft_clear_plan_cache::schema> create__cufft_clear_plan_cache_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cufft_clear_plan_cache::name, _cufft_clear_plan_cache::overload_name)
      .typed<_cufft_clear_plan_cache::schema>();
}

// aten::_cufft_clear_plan_cache(DeviceIndex device_index) -> ()
void _cufft_clear_plan_cache::call(at::DeviceIndex device_index) {
    
    static auto op = create__cufft_clear_plan_cache_typed_handle();
    return op.call(device_index);
}

// aten::_cufft_clear_plan_cache(DeviceIndex device_index) -> ()
void _cufft_clear_plan_cache::redispatch(c10::DispatchKeySet dispatchKeySet, at::DeviceIndex device_index) {
    
    static auto op = create__cufft_clear_plan_cache_typed_handle();
    return op.redispatch(dispatchKeySet, device_index);
}

// aten::_unsafe_masked_index(Tensor self, Tensor mask, Tensor?[] indices, Scalar fill) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_unsafe_masked_index::schema> create__unsafe_masked_index_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_unsafe_masked_index::name, _unsafe_masked_index::overload_name)
      .typed<_unsafe_masked_index::schema>();
}

// aten::_unsafe_masked_index(Tensor self, Tensor mask, Tensor?[] indices, Scalar fill) -> Tensor
at::Tensor _unsafe_masked_index::call(const at::Tensor & self, const at::Tensor & mask, const c10::List<::std::optional<at::Tensor>> & indices, const at::Scalar & fill) {
    
    static auto op = create__unsafe_masked_index_typed_handle();
    return op.call(self, mask, indices, fill);
}

// aten::_unsafe_masked_index(Tensor self, Tensor mask, Tensor?[] indices, Scalar fill) -> Tensor
at::Tensor _unsafe_masked_index::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const c10::List<::std::optional<at::Tensor>> & indices, const at::Scalar & fill) {
    
    static auto op = create__unsafe_masked_index_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, indices, fill);
}

// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<isin_Tensor_Tensor_out::schema> create_isin_Tensor_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isin_Tensor_Tensor_out::name, isin_Tensor_Tensor_out::overload_name)
      .typed<isin_Tensor_Tensor_out::schema>();
}

// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isin_Tensor_Tensor_out::call(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
    
    static auto op = create_isin_Tensor_Tensor_out_typed_handle();
    return op.call(elements, test_elements, assume_unique, invert, out);
}

// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isin_Tensor_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
    
    static auto op = create_isin_Tensor_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, elements, test_elements, assume_unique, invert, out);
}

// aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<isin_Tensor_Tensor::schema> create_isin_Tensor_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isin_Tensor_Tensor::name, isin_Tensor_Tensor::overload_name)
      .typed<isin_Tensor_Tensor::schema>();
}

// aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
at::Tensor isin_Tensor_Tensor::call(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
    
    static auto op = create_isin_Tensor_Tensor_typed_handle();
    return op.call(elements, test_elements, assume_unique, invert);
}

// aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
at::Tensor isin_Tensor_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
    
    static auto op = create_isin_Tensor_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, elements, test_elements, assume_unique, invert);
}

// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<isin_Tensor_Scalar_out::schema> create_isin_Tensor_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isin_Tensor_Scalar_out::name, isin_Tensor_Scalar_out::overload_name)
      .typed<isin_Tensor_Scalar_out::schema>();
}

// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isin_Tensor_Scalar_out::call(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) {
    
    static auto op = create_isin_Tensor_Scalar_out_typed_handle();
    return op.call(elements, test_element, assume_unique, invert, out);
}

// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isin_Tensor_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) {
    
    static auto op = create_isin_Tensor_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, elements, test_element, assume_unique, invert, out);
}

// aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<isin_Tensor_Scalar::schema> create_isin_Tensor_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isin_Tensor_Scalar::name, isin_Tensor_Scalar::overload_name)
      .typed<isin_Tensor_Scalar::schema>();
}

// aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor
at::Tensor isin_Tensor_Scalar::call(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
    
    static auto op = create_isin_Tensor_Scalar_typed_handle();
    return op.call(elements, test_element, assume_unique, invert);
}

// aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor
at::Tensor isin_Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
    
    static auto op = create_isin_Tensor_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, elements, test_element, assume_unique, invert);
}

// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<isin_Scalar_Tensor_out::schema> create_isin_Scalar_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isin_Scalar_Tensor_out::name, isin_Scalar_Tensor_out::overload_name)
      .typed<isin_Scalar_Tensor_out::schema>();
}

// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isin_Scalar_Tensor_out::call(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
    
    static auto op = create_isin_Scalar_Tensor_out_typed_handle();
    return op.call(element, test_elements, assume_unique, invert, out);
}

// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isin_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
    
    static auto op = create_isin_Scalar_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, element, test_elements, assume_unique, invert, out);
}

// aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<isin_Scalar_Tensor::schema> create_isin_Scalar_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isin_Scalar_Tensor::name, isin_Scalar_Tensor::overload_name)
      .typed<isin_Scalar_Tensor::schema>();
}

// aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
at::Tensor isin_Scalar_Tensor::call(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
    
    static auto op = create_isin_Scalar_Tensor_typed_handle();
    return op.call(element, test_elements, assume_unique, invert);
}

// aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
at::Tensor isin_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
    
    static auto op = create_isin_Scalar_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, element, test_elements, assume_unique, invert);
}

// aten::is_conj(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_conj::schema> create_is_conj_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_conj::name, is_conj::overload_name)
      .typed<is_conj::schema>();
}

// aten::is_conj(Tensor self) -> bool
bool is_conj::call(const at::Tensor & self) {
    
    static auto op = create_is_conj_typed_handle();
    return op.call(self);
}

// aten::is_conj(Tensor self) -> bool
bool is_conj::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_is_conj_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_is_zerotensor(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<_is_zerotensor::schema> create__is_zerotensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_is_zerotensor::name, _is_zerotensor::overload_name)
      .typed<_is_zerotensor::schema>();
}

// aten::_is_zerotensor(Tensor self) -> bool
bool _is_zerotensor::call(const at::Tensor & self) {
    
    static auto op = create__is_zerotensor_typed_handle();
    return op.call(self);
}

// aten::_is_zerotensor(Tensor self) -> bool
bool _is_zerotensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__is_zerotensor_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::is_nonzero(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_nonzero::schema> create_is_nonzero_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_nonzero::name, is_nonzero::overload_name)
      .typed<is_nonzero::schema>();
}

// aten::is_nonzero(Tensor self) -> bool
bool is_nonzero::call(const at::Tensor & self) {
    
    static auto op = create_is_nonzero_typed_handle();
    return op.call(self);
}

// aten::is_nonzero(Tensor self) -> bool
bool is_nonzero::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_is_nonzero_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::is_signed(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_signed::schema> create_is_signed_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_signed::name, is_signed::overload_name)
      .typed<is_signed::schema>();
}

// aten::is_signed(Tensor self) -> bool
bool is_signed::call(const at::Tensor & self) {
    
    static auto op = create_is_signed_typed_handle();
    return op.call(self);
}

// aten::is_signed(Tensor self) -> bool
bool is_signed::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_is_signed_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<layer_norm::schema> create_layer_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(layer_norm::name, layer_norm::overload_name)
      .typed<layer_norm::schema>();
}

// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
at::Tensor layer_norm::call(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps, bool cudnn_enable) {
    
    static auto op = create_layer_norm_typed_handle();
    return op.call(input, normalized_shape, weight, bias, eps, cudnn_enable);
}

// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
at::Tensor layer_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps, bool cudnn_enable) {
    
    static auto op = create_layer_norm_typed_handle();
    return op.redispatch(dispatchKeySet, input, normalized_shape, weight, bias, eps, cudnn_enable);
}

// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<native_layer_norm_backward::schema> create_native_layer_norm_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_layer_norm_backward::name, native_layer_norm_backward::overload_name)
      .typed<native_layer_norm_backward::schema>();
}

// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward::call(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
    
    static auto op = create_native_layer_norm_backward_typed_handle();
    return op.call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
}

// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
    
    static auto op = create_native_layer_norm_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
}

// aten::rms_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, float? eps=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rms_norm::schema> create_rms_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rms_norm::name, rms_norm::overload_name)
      .typed<rms_norm::schema>();
}

// aten::rms_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, float? eps=None) -> Tensor
at::Tensor rms_norm::call(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, ::std::optional<double> eps) {
    
    static auto op = create_rms_norm_typed_handle();
    return op.call(input, normalized_shape, weight, eps);
}

// aten::rms_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, float? eps=None) -> Tensor
at::Tensor rms_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, ::std::optional<double> eps) {
    
    static auto op = create_rms_norm_typed_handle();
    return op.redispatch(dispatchKeySet, input, normalized_shape, weight, eps);
}

// aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_linear_fp16_weight::schema> create_fbgemm_linear_fp16_weight_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fbgemm_linear_fp16_weight::name, fbgemm_linear_fp16_weight::overload_name)
      .typed<fbgemm_linear_fp16_weight::schema>();
}

// aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
at::Tensor fbgemm_linear_fp16_weight::call(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
    
    static auto op = create_fbgemm_linear_fp16_weight_typed_handle();
    return op.call(input, packed_weight, bias);
}

// aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
at::Tensor fbgemm_linear_fp16_weight::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
    
    static auto op = create_fbgemm_linear_fp16_weight_typed_handle();
    return op.redispatch(dispatchKeySet, input, packed_weight, bias);
}

// aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_pack_quantized_matrix::schema> create_fbgemm_pack_quantized_matrix_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fbgemm_pack_quantized_matrix::name, fbgemm_pack_quantized_matrix::overload_name)
      .typed<fbgemm_pack_quantized_matrix::schema>();
}

// aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor
at::Tensor fbgemm_pack_quantized_matrix::call(const at::Tensor & input) {
    
    static auto op = create_fbgemm_pack_quantized_matrix_typed_handle();
    return op.call(input);
}

// aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor
at::Tensor fbgemm_pack_quantized_matrix::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input) {
    
    static auto op = create_fbgemm_pack_quantized_matrix_typed_handle();
    return op.redispatch(dispatchKeySet, input);
}

// aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_pack_quantized_matrix_KN::schema> create_fbgemm_pack_quantized_matrix_KN_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fbgemm_pack_quantized_matrix_KN::name, fbgemm_pack_quantized_matrix_KN::overload_name)
      .typed<fbgemm_pack_quantized_matrix_KN::schema>();
}

// aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor
at::Tensor fbgemm_pack_quantized_matrix_KN::call(const at::Tensor & input, int64_t K, int64_t N) {
    
    static auto op = create_fbgemm_pack_quantized_matrix_KN_typed_handle();
    return op.call(input, K, N);
}

// aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor
at::Tensor fbgemm_pack_quantized_matrix_KN::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t K, int64_t N) {
    
    static auto op = create_fbgemm_pack_quantized_matrix_KN_typed_handle();
    return op.redispatch(dispatchKeySet, input, K, N);
}

// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ldexp_Tensor::schema> create_ldexp_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ldexp_Tensor::name, ldexp_Tensor::overload_name)
      .typed<ldexp_Tensor::schema>();
}

// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor ldexp_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ldexp_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor ldexp_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ldexp_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ldexp_::schema> create_ldexp__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ldexp_::name, ldexp_::overload_name)
      .typed<ldexp_::schema>();
}

// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & ldexp_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ldexp__typed_handle();
    return op.call(self, other);
}

// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & ldexp_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ldexp__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ldexp_out::schema> create_ldexp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ldexp_out::name, ldexp_out::overload_name)
      .typed<ldexp_out::schema>();
}

// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ldexp_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_ldexp_out_typed_handle();
    return op.call(self, other, out);
}

// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ldexp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_ldexp_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::log(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<log::schema> create_log_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log::name, log::overload_name)
      .typed<log::schema>();
}

// aten::log(Tensor self) -> Tensor
at::Tensor log::call(const at::Tensor & self) {
    
    static auto op = create_log_typed_handle();
    return op.call(self);
}

// aten::log(Tensor self) -> Tensor
at::Tensor log::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_log_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::log_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log_::schema> create_log__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_::name, log_::overload_name)
      .typed<log_::schema>();
}

// aten::log_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & log_::call(at::Tensor & self) {
    
    static auto op = create_log__typed_handle();
    return op.call(self);
}

// aten::log_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & log_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_log__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log_out::schema> create_log_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_out::name, log_out::overload_name)
      .typed<log_out::schema>();
}

// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_log_out_typed_handle();
    return op.call(self, out);
}

// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_log_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::log2(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<log2::schema> create_log2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log2::name, log2::overload_name)
      .typed<log2::schema>();
}

// aten::log2(Tensor self) -> Tensor
at::Tensor log2::call(const at::Tensor & self) {
    
    static auto op = create_log2_typed_handle();
    return op.call(self);
}

// aten::log2(Tensor self) -> Tensor
at::Tensor log2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_log2_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::log2_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log2_::schema> create_log2__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log2_::name, log2_::overload_name)
      .typed<log2_::schema>();
}

// aten::log2_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & log2_::call(at::Tensor & self) {
    
    static auto op = create_log2__typed_handle();
    return op.call(self);
}

// aten::log2_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & log2_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_log2__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log2_out::schema> create_log2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log2_out::name, log2_out::overload_name)
      .typed<log2_out::schema>();
}

// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log2_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_log2_out_typed_handle();
    return op.call(self, out);
}

// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_log2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logaddexp_out::schema> create_logaddexp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logaddexp_out::name, logaddexp_out::overload_name)
      .typed<logaddexp_out::schema>();
}

// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logaddexp_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_logaddexp_out_typed_handle();
    return op.call(self, other, out);
}

// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logaddexp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_logaddexp_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::logaddexp(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logaddexp::schema> create_logaddexp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logaddexp::name, logaddexp::overload_name)
      .typed<logaddexp::schema>();
}

// aten::logaddexp(Tensor self, Tensor other) -> Tensor
at::Tensor logaddexp::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logaddexp_typed_handle();
    return op.call(self, other);
}

// aten::logaddexp(Tensor self, Tensor other) -> Tensor
at::Tensor logaddexp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logaddexp_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logspace::schema> create_logspace_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace::name, logspace::overload_name)
      .typed<logspace::schema>();
}

// aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace::call(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_typed_handle();
    return op.call(start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logspace_Tensor_Tensor::schema> create_logspace_Tensor_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace_Tensor_Tensor::name, logspace_Tensor_Tensor::overload_name)
      .typed<logspace_Tensor_Tensor::schema>();
}

// aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace_Tensor_Tensor::call(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_Tensor_Tensor_typed_handle();
    return op.call(start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace_Tensor_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_Tensor_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logspace_Tensor_Scalar::schema> create_logspace_Tensor_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace_Tensor_Scalar::name, logspace_Tensor_Scalar::overload_name)
      .typed<logspace_Tensor_Scalar::schema>();
}

// aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace_Tensor_Scalar::call(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_Tensor_Scalar_typed_handle();
    return op.call(start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace_Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_Tensor_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logspace_Scalar_Tensor::schema> create_logspace_Scalar_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace_Scalar_Tensor::name, logspace_Scalar_Tensor::overload_name)
      .typed<logspace_Scalar_Tensor::schema>();
}

// aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace_Scalar_Tensor::call(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_Scalar_Tensor_typed_handle();
    return op.call(start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_Scalar_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logspace_out::schema> create_logspace_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace_out::name, logspace_out::overload_name)
      .typed<logspace_out::schema>();
}

// aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_out::call(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_out_typed_handle();
    return op.call(start, end, steps, base, out);
}

// aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_out_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, out);
}

// aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logspace_Tensor_Tensor_out::schema> create_logspace_Tensor_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace_Tensor_Tensor_out::name, logspace_Tensor_Tensor_out::overload_name)
      .typed<logspace_Tensor_Tensor_out::schema>();
}

// aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_Tensor_Tensor_out::call(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_Tensor_Tensor_out_typed_handle();
    return op.call(start, end, steps, base, out);
}

// aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_Tensor_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_Tensor_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, out);
}

// aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logspace_Tensor_Scalar_out::schema> create_logspace_Tensor_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace_Tensor_Scalar_out::name, logspace_Tensor_Scalar_out::overload_name)
      .typed<logspace_Tensor_Scalar_out::schema>();
}

// aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_Tensor_Scalar_out::call(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_Tensor_Scalar_out_typed_handle();
    return op.call(start, end, steps, base, out);
}

// aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_Tensor_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_Tensor_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, out);
}

// aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logspace_Scalar_Tensor_out::schema> create_logspace_Scalar_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace_Scalar_Tensor_out::name, logspace_Scalar_Tensor_out::overload_name)
      .typed<logspace_Scalar_Tensor_out::schema>();
}

// aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_Scalar_Tensor_out::call(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_Scalar_Tensor_out_typed_handle();
    return op.call(start, end, steps, base, out);
}

// aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_Scalar_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, out);
}

// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<log_softmax_int::schema> create_log_softmax_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_softmax_int::name, log_softmax_int::overload_name)
      .typed<log_softmax_int::schema>();
}

// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor log_softmax_int::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_log_softmax_int_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor log_softmax_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_log_softmax_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log_softmax_int_out::schema> create_log_softmax_int_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_softmax_int_out::name, log_softmax_int_out::overload_name)
      .typed<log_softmax_int_out::schema>();
}

// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log_softmax_int_out::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_log_softmax_int_out_typed_handle();
    return op.call(self, dim, dtype, out);
}

// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log_softmax_int_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_log_softmax_int_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype, out);
}

// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<log_softmax_Dimname::schema> create_log_softmax_Dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_softmax_Dimname::name, log_softmax_Dimname::overload_name)
      .typed<log_softmax_Dimname::schema>();
}

// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor log_softmax_Dimname::call(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_log_softmax_Dimname_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor log_softmax_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_log_softmax_Dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::matrix_power(Tensor self, int n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<matrix_power::schema> create_matrix_power_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(matrix_power::name, matrix_power::overload_name)
      .typed<matrix_power::schema>();
}

// aten::matrix_power(Tensor self, int n) -> Tensor
at::Tensor matrix_power::call(const at::Tensor & self, int64_t n) {
    
    static auto op = create_matrix_power_typed_handle();
    return op.call(self, n);
}

// aten::matrix_power(Tensor self, int n) -> Tensor
at::Tensor matrix_power::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n) {
    
    static auto op = create_matrix_power_typed_handle();
    return op.redispatch(dispatchKeySet, self, n);
}

// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<matrix_power_out::schema> create_matrix_power_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(matrix_power_out::name, matrix_power_out::overload_name)
      .typed<matrix_power_out::schema>();
}

// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & matrix_power_out::call(const at::Tensor & self, int64_t n, at::Tensor & out) {
    
    static auto op = create_matrix_power_out_typed_handle();
    return op.call(self, n, out);
}

// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & matrix_power_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out) {
    
    static auto op = create_matrix_power_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, out);
}

// aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool3d::schema> create_mkldnn_max_pool3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_max_pool3d::name, mkldnn_max_pool3d::overload_name)
      .typed<mkldnn_max_pool3d::schema>();
}

// aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor mkldnn_max_pool3d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_mkldnn_max_pool3d_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor mkldnn_max_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_mkldnn_max_pool3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantized_max_pool3d::schema> create_quantized_max_pool3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantized_max_pool3d::name, quantized_max_pool3d::overload_name)
      .typed<quantized_max_pool3d::schema>();
}

// aten::quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor quantized_max_pool3d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_quantized_max_pool3d_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor quantized_max_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_quantized_max_pool3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<mps_convolution_backward::schema> create_mps_convolution_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mps_convolution_backward::name, mps_convolution_backward::overload_name)
      .typed<mps_convolution_backward::schema>();
}

// aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> mps_convolution_backward::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,3> output_mask) {
    
    static auto op = create_mps_convolution_backward_typed_handle();
    return op.call(self, grad_output, weight, padding, stride, dilation, groups, output_mask);
}

// aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> mps_convolution_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,3> output_mask) {
    
    static auto op = create_mps_convolution_backward_typed_handle();
    return op.redispatch(dispatchKeySet, self, grad_output, weight, padding, stride, dilation, groups, output_mask);
}

// aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_rnn_layer::schema> create_mkldnn_rnn_layer_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_rnn_layer::name, mkldnn_rnn_layer::overload_name)
      .typed<mkldnn_rnn_layer::schema>();
}

// aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer::call(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) {
    
    static auto op = create_mkldnn_rnn_layer_typed_handle();
    return op.call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
}

// aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) {
    
    static auto op = create_mkldnn_rnn_layer_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
}

// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<miopen_convolution::schema> create_miopen_convolution_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_convolution::name, miopen_convolution::overload_name)
      .typed<miopen_convolution::schema>();
}

// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor
at::Tensor miopen_convolution::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) {
    
    static auto op = create_miopen_convolution_typed_handle();
    return op.call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
}

// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor
at::Tensor miopen_convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) {
    
    static auto op = create_miopen_convolution_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
}

// aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<miopen_rnn::schema> create_miopen_rnn_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_rnn::name, miopen_rnn::overload_name)
      .typed<miopen_rnn::schema>();
}

// aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> miopen_rnn::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state) {
    
    static auto op = create_miopen_rnn_typed_handle();
    return op.call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
}

// aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> miopen_rnn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state) {
    
    static auto op = create_miopen_rnn_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
}

// aten::_convert_weight_to_int4pack_for_cpu(Tensor self, int innerKTiles) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_convert_weight_to_int4pack_for_cpu::schema> create__convert_weight_to_int4pack_for_cpu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_convert_weight_to_int4pack_for_cpu::name, _convert_weight_to_int4pack_for_cpu::overload_name)
      .typed<_convert_weight_to_int4pack_for_cpu::schema>();
}

// aten::_convert_weight_to_int4pack_for_cpu(Tensor self, int innerKTiles) -> Tensor
at::Tensor _convert_weight_to_int4pack_for_cpu::call(const at::Tensor & self, int64_t innerKTiles) {
    
    static auto op = create__convert_weight_to_int4pack_for_cpu_typed_handle();
    return op.call(self, innerKTiles);
}

// aten::_convert_weight_to_int4pack_for_cpu(Tensor self, int innerKTiles) -> Tensor
at::Tensor _convert_weight_to_int4pack_for_cpu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t innerKTiles) {
    
    static auto op = create__convert_weight_to_int4pack_for_cpu_typed_handle();
    return op.redispatch(dispatchKeySet, self, innerKTiles);
}

// aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_mm::schema> create__sparse_mm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_mm::name, _sparse_mm::overload_name)
      .typed<_sparse_mm::schema>();
}

// aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor
at::Tensor _sparse_mm::call(const at::Tensor & sparse, const at::Tensor & dense) {
    
    static auto op = create__sparse_mm_typed_handle();
    return op.call(sparse, dense);
}

// aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor
at::Tensor _sparse_mm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sparse, const at::Tensor & dense) {
    
    static auto op = create__sparse_mm_typed_handle();
    return op.redispatch(dispatchKeySet, sparse, dense);
}

// aten::_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_mm_reduce::schema> create__sparse_mm_reduce_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_mm_reduce::name, _sparse_mm_reduce::overload_name)
      .typed<_sparse_mm_reduce::schema>();
}

// aten::_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor
at::Tensor _sparse_mm_reduce::call(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) {
    
    static auto op = create__sparse_mm_reduce_typed_handle();
    return op.call(sparse, dense, reduce);
}

// aten::_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor
at::Tensor _sparse_mm_reduce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) {
    
    static auto op = create__sparse_mm_reduce_typed_handle();
    return op.redispatch(dispatchKeySet, sparse, dense, reduce);
}

// aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sparse_matmul::schema> create__sparse_sparse_matmul_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_sparse_matmul::name, _sparse_sparse_matmul::overload_name)
      .typed<_sparse_sparse_matmul::schema>();
}

// aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor
at::Tensor _sparse_sparse_matmul::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create__sparse_sparse_matmul_typed_handle();
    return op.call(self, other);
}

// aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor
at::Tensor _sparse_sparse_matmul::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create__sparse_sparse_matmul_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_native_batch_norm_legit::schema> create__native_batch_norm_legit_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_native_batch_norm_legit::name, _native_batch_norm_legit::overload_name)
      .typed<_native_batch_norm_legit::schema>();
}

// aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
    
    static auto op = create__native_batch_norm_legit_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, training, momentum, eps);
}

// aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
    
    static auto op = create__native_batch_norm_legit_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps);
}

// aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))
static C10_NOINLINE c10::TypedOperatorHandle<_native_batch_norm_legit_out::schema> create__native_batch_norm_legit_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_native_batch_norm_legit_out::name, _native_batch_norm_legit_out::overload_name)
      .typed<_native_batch_norm_legit_out::schema>();
}

// aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
    
    static auto op = create__native_batch_norm_legit_out_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
}

// aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
    
    static auto op = create__native_batch_norm_legit_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
}

// aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_native_batch_norm_legit_no_stats::schema> create__native_batch_norm_legit_no_stats_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_native_batch_norm_legit_no_stats::name, _native_batch_norm_legit_no_stats::overload_name)
      .typed<_native_batch_norm_legit_no_stats::schema>();
}

// aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_stats::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
    
    static auto op = create__native_batch_norm_legit_no_stats_typed_handle();
    return op.call(input, weight, bias, training, momentum, eps);
}

// aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_stats::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
    
    static auto op = create__native_batch_norm_legit_no_stats_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, training, momentum, eps);
}

// aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<_native_batch_norm_legit_no_stats_out::schema> create__native_batch_norm_legit_no_stats_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_native_batch_norm_legit_no_stats_out::name, _native_batch_norm_legit_no_stats_out::overload_name)
      .typed<_native_batch_norm_legit_no_stats_out::schema>();
}

// aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_no_stats_out::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
    
    static auto op = create__native_batch_norm_legit_no_stats_out_typed_handle();
    return op.call(input, weight, bias, training, momentum, eps, out, save_mean, save_invstd);
}

// aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_no_stats_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
    
    static auto op = create__native_batch_norm_legit_no_stats_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, training, momentum, eps, out, save_mean, save_invstd);
}

// aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_update_stats::schema> create_batch_norm_update_stats_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_update_stats::name, batch_norm_update_stats::overload_name)
      .typed<batch_norm_update_stats::schema>();
}

// aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> batch_norm_update_stats::call(const at::Tensor & input, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum) {
    
    static auto op = create_batch_norm_update_stats_typed_handle();
    return op.call(input, running_mean, running_var, momentum);
}

// aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> batch_norm_update_stats::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum) {
    
    static auto op = create_batch_norm_update_stats_typed_handle();
    return op.redispatch(dispatchKeySet, input, running_mean, running_var, momentum);
}

// aten::_nnpack_available() -> bool
static C10_NOINLINE c10::TypedOperatorHandle<_nnpack_available::schema> create__nnpack_available_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nnpack_available::name, _nnpack_available::overload_name)
      .typed<_nnpack_available::schema>();
}

// aten::_nnpack_available() -> bool
bool _nnpack_available::call() {
    
    static auto op = create__nnpack_available_typed_handle();
    return op.call();
}

// aten::_nnpack_available() -> bool
bool _nnpack_available::redispatch(c10::DispatchKeySet dispatchKeySet) {
    
    static auto op = create__nnpack_available_typed_handle();
    return op.redispatch(dispatchKeySet);
}

// aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ones_like::schema> create_ones_like_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ones_like::name, ones_like::overload_name)
      .typed<ones_like::schema>();
}

// aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor ones_like::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_ones_like_typed_handle();
    return op.call(self, dtype, layout, device, pin_memory, memory_format);
}

// aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor ones_like::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_ones_like_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format);
}

// aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_euclidean_dist::schema> create__euclidean_dist_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_euclidean_dist::name, _euclidean_dist::overload_name)
      .typed<_euclidean_dist::schema>();
}

// aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor
at::Tensor _euclidean_dist::call(const at::Tensor & x1, const at::Tensor & x2) {
    
    static auto op = create__euclidean_dist_typed_handle();
    return op.call(x1, x2);
}

// aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor
at::Tensor _euclidean_dist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2) {
    
    static auto op = create__euclidean_dist_typed_handle();
    return op.redispatch(dispatchKeySet, x1, x2);
}

// aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cdist_backward::schema> create__cdist_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cdist_backward::name, _cdist_backward::overload_name)
      .typed<_cdist_backward::schema>();
}

// aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor
at::Tensor _cdist_backward::call(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
    
    static auto op = create__cdist_backward_typed_handle();
    return op.call(grad, x1, x2, p, cdist);
}

// aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor
at::Tensor _cdist_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
    
    static auto op = create__cdist_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, x1, x2, p, cdist);
}

// aten::_pdist_forward(Tensor self, float p=2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_pdist_forward::schema> create__pdist_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_pdist_forward::name, _pdist_forward::overload_name)
      .typed<_pdist_forward::schema>();
}

// aten::_pdist_forward(Tensor self, float p=2) -> Tensor
at::Tensor _pdist_forward::call(const at::Tensor & self, double p) {
    
    static auto op = create__pdist_forward_typed_handle();
    return op.call(self, p);
}

// aten::_pdist_forward(Tensor self, float p=2) -> Tensor
at::Tensor _pdist_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p) {
    
    static auto op = create__pdist_forward_typed_handle();
    return op.redispatch(dispatchKeySet, self, p);
}

// aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<native_channel_shuffle::schema> create_native_channel_shuffle_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_channel_shuffle::name, native_channel_shuffle::overload_name)
      .typed<native_channel_shuffle::schema>();
}

// aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor
at::Tensor native_channel_shuffle::call(const at::Tensor & self, c10::SymInt groups) {
    
    static auto op = create_native_channel_shuffle_typed_handle();
    return op.call(self, groups);
}

// aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor
at::Tensor native_channel_shuffle::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt groups) {
    
    static auto op = create_native_channel_shuffle_typed_handle();
    return op.redispatch(dispatchKeySet, self, groups);
}

// aten::rad2deg(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rad2deg::schema> create_rad2deg_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rad2deg::name, rad2deg::overload_name)
      .typed<rad2deg::schema>();
}

// aten::rad2deg(Tensor self) -> Tensor
at::Tensor rad2deg::call(const at::Tensor & self) {
    
    static auto op = create_rad2deg_typed_handle();
    return op.call(self);
}

// aten::rad2deg(Tensor self) -> Tensor
at::Tensor rad2deg::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_rad2deg_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::rad2deg_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rad2deg_::schema> create_rad2deg__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rad2deg_::name, rad2deg_::overload_name)
      .typed<rad2deg_::schema>();
}

// aten::rad2deg_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & rad2deg_::call(at::Tensor & self) {
    
    static auto op = create_rad2deg__typed_handle();
    return op.call(self);
}

// aten::rad2deg_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & rad2deg_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_rad2deg__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rad2deg_out::schema> create_rad2deg_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rad2deg_out::name, rad2deg_out::overload_name)
      .typed<rad2deg_out::schema>();
}

// aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rad2deg_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_rad2deg_out_typed_handle();
    return op.call(self, out);
}

// aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rad2deg_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_rad2deg_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scalar_tensor::schema> create_scalar_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scalar_tensor::name, scalar_tensor::overload_name)
      .typed<scalar_tensor::schema>();
}

// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor scalar_tensor::call(const at::Scalar & s, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_scalar_tensor_typed_handle();
    return op.call(s, dtype, layout, device, pin_memory);
}

// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor scalar_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & s, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_scalar_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, s, dtype, layout, device, pin_memory);
}

// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rand_names::schema> create_rand_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_names::name, rand_names::overload_name)
      .typed<rand_names::schema>();
}

// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand_names::call(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_names_typed_handle();
    return op.call(size, names, dtype, layout, device, pin_memory);
}

// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand_names::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_names_typed_handle();
    return op.redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory);
}

// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rand_generator_with_names::schema> create_rand_generator_with_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_generator_with_names::name, rand_generator_with_names::overload_name)
      .typed<rand_generator_with_names::schema>();
}

// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand_generator_with_names::call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_generator_with_names_typed_handle();
    return op.call(size, generator, names, dtype, layout, device, pin_memory);
}

// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand_generator_with_names::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_generator_with_names_typed_handle();
    return op.redispatch(dispatchKeySet, size, generator, names, dtype, layout, device, pin_memory);
}

// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rand::schema> create_rand_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand::name, rand::overload_name)
      .typed<rand::schema>();
}

// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand::call(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_typed_handle();
    return op.call(size, dtype, layout, device, pin_memory);
}

// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_typed_handle();
    return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory);
}

// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rand_generator::schema> create_rand_generator_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_generator::name, rand_generator::overload_name)
      .typed<rand_generator::schema>();
}

// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand_generator::call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_generator_typed_handle();
    return op.call(size, generator, dtype, layout, device, pin_memory);
}

// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand_generator::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_generator_typed_handle();
    return op.redispatch(dispatchKeySet, size, generator, dtype, layout, device, pin_memory);
}

// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rand_out::schema> create_rand_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_out::name, rand_out::overload_name)
      .typed<rand_out::schema>();
}

// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_out::call(c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_rand_out_typed_handle();
    return op.call(size, out);
}

// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_rand_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, out);
}

// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rand_generator_out::schema> create_rand_generator_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_generator_out::name, rand_generator_out::overload_name)
      .typed<rand_generator_out::schema>();
}

// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_generator_out::call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_rand_generator_out_typed_handle();
    return op.call(size, generator, out);
}

// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_generator_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_rand_generator_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, generator, out);
}

// aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randint::schema> create_randint_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint::name, randint::overload_name)
      .typed<randint::schema>();
}

// aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint::call(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_typed_handle();
    return op.call(high, size, dtype, layout, device, pin_memory);
}

// aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_typed_handle();
    return op.redispatch(dispatchKeySet, high, size, dtype, layout, device, pin_memory);
}

// aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randint_generator::schema> create_randint_generator_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_generator::name, randint_generator::overload_name)
      .typed<randint_generator::schema>();
}

// aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint_generator::call(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_generator_typed_handle();
    return op.call(high, size, generator, dtype, layout, device, pin_memory);
}

// aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint_generator::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_generator_typed_handle();
    return op.redispatch(dispatchKeySet, high, size, generator, dtype, layout, device, pin_memory);
}

// aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randint_low::schema> create_randint_low_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_low::name, randint_low::overload_name)
      .typed<randint_low::schema>();
}

// aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint_low::call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_low_typed_handle();
    return op.call(low, high, size, dtype, layout, device, pin_memory);
}

// aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint_low::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_low_typed_handle();
    return op.redispatch(dispatchKeySet, low, high, size, dtype, layout, device, pin_memory);
}

// aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randint_low_generator::schema> create_randint_low_generator_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_low_generator::name, randint_low_generator::overload_name)
      .typed<randint_low_generator::schema>();
}

// aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint_low_generator::call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_low_generator_typed_handle();
    return op.call(low, high, size, generator, dtype, layout, device, pin_memory);
}

// aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint_low_generator::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_low_generator_typed_handle();
    return op.redispatch(dispatchKeySet, low, high, size, generator, dtype, layout, device, pin_memory);
}

// aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randint_out::schema> create_randint_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_out::name, randint_out::overload_name)
      .typed<randint_out::schema>();
}

// aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_out::call(c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_randint_out_typed_handle();
    return op.call(high, size, out);
}

// aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_randint_out_typed_handle();
    return op.redispatch(dispatchKeySet, high, size, out);
}

// aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randint_generator_out::schema> create_randint_generator_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_generator_out::name, randint_generator_out::overload_name)
      .typed<randint_generator_out::schema>();
}

// aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_generator_out::call(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_randint_generator_out_typed_handle();
    return op.call(high, size, generator, out);
}

// aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_generator_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_randint_generator_out_typed_handle();
    return op.redispatch(dispatchKeySet, high, size, generator, out);
}

// aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randint_low_out::schema> create_randint_low_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_low_out::name, randint_low_out::overload_name)
      .typed<randint_low_out::schema>();
}

// aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_low_out::call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_randint_low_out_typed_handle();
    return op.call(low, high, size, out);
}

// aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_low_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_randint_low_out_typed_handle();
    return op.redispatch(dispatchKeySet, low, high, size, out);
}

// aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randint_low_generator_out::schema> create_randint_low_generator_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_low_generator_out::name, randint_low_generator_out::overload_name)
      .typed<randint_low_generator_out::schema>();
}

// aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_low_generator_out::call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_randint_low_generator_out_typed_handle();
    return op.call(low, high, size, generator, out);
}

// aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_low_generator_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_randint_low_generator_out_typed_handle();
    return op.redispatch(dispatchKeySet, low, high, size, generator, out);
}

// aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randn_like::schema> create_randn_like_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randn_like::name, randn_like::overload_name)
      .typed<randn_like::schema>();
}

// aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor randn_like::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_randn_like_typed_handle();
    return op.call(self, dtype, layout, device, pin_memory, memory_format);
}

// aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor randn_like::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_randn_like_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format);
}

// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<repeat::schema> create_repeat_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(repeat::name, repeat::overload_name)
      .typed<repeat::schema>();
}

// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor
at::Tensor repeat::call(const at::Tensor & self, c10::SymIntArrayRef repeats) {
    
    static auto op = create_repeat_typed_handle();
    return op.call(self, repeats);
}

// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor
at::Tensor repeat::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef repeats) {
    
    static auto op = create_repeat_typed_handle();
    return op.redispatch(dispatchKeySet, self, repeats);
}

// aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<repeat_interleave_Tensor::schema> create_repeat_interleave_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(repeat_interleave_Tensor::name, repeat_interleave_Tensor::overload_name)
      .typed<repeat_interleave_Tensor::schema>();
}

// aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor
at::Tensor repeat_interleave_Tensor::call(const at::Tensor & repeats, ::std::optional<c10::SymInt> output_size) {
    
    static auto op = create_repeat_interleave_Tensor_typed_handle();
    return op.call(repeats, output_size);
}

// aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor
at::Tensor repeat_interleave_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & repeats, ::std::optional<c10::SymInt> output_size) {
    
    static auto op = create_repeat_interleave_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, repeats, output_size);
}

// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<repeat_interleave_self_Tensor::schema> create_repeat_interleave_self_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(repeat_interleave_self_Tensor::name, repeat_interleave_self_Tensor::overload_name)
      .typed<repeat_interleave_self_Tensor::schema>();
}

// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
at::Tensor repeat_interleave_self_Tensor::call(const at::Tensor & self, const at::Tensor & repeats, ::std::optional<int64_t> dim, ::std::optional<c10::SymInt> output_size) {
    
    static auto op = create_repeat_interleave_self_Tensor_typed_handle();
    return op.call(self, repeats, dim, output_size);
}

// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
at::Tensor repeat_interleave_self_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & repeats, ::std::optional<int64_t> dim, ::std::optional<c10::SymInt> output_size) {
    
    static auto op = create_repeat_interleave_self_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, repeats, dim, output_size);
}

// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<repeat_interleave_self_int::schema> create_repeat_interleave_self_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(repeat_interleave_self_int::name, repeat_interleave_self_int::overload_name)
      .typed<repeat_interleave_self_int::schema>();
}

// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
at::Tensor repeat_interleave_self_int::call(const at::Tensor & self, c10::SymInt repeats, ::std::optional<int64_t> dim, ::std::optional<c10::SymInt> output_size) {
    
    static auto op = create_repeat_interleave_self_int_typed_handle();
    return op.call(self, repeats, dim, output_size);
}

// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
at::Tensor repeat_interleave_self_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt repeats, ::std::optional<int64_t> dim, ::std::optional<c10::SymInt> output_size) {
    
    static auto op = create_repeat_interleave_self_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, repeats, dim, output_size);
}

// aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_mkldnn_reshape::schema> create__mkldnn_reshape_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_mkldnn_reshape::name, _mkldnn_reshape::overload_name)
      .typed<_mkldnn_reshape::schema>();
}

// aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor
at::Tensor _mkldnn_reshape::call(const at::Tensor & self, at::IntArrayRef shape) {
    
    static auto op = create__mkldnn_reshape_typed_handle();
    return op.call(self, shape);
}

// aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor
at::Tensor _mkldnn_reshape::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape) {
    
    static auto op = create__mkldnn_reshape_typed_handle();
    return op.redispatch(dispatchKeySet, self, shape);
}

// aten::_prelu_kernel(Tensor self, Tensor weight) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_prelu_kernel::schema> create__prelu_kernel_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_prelu_kernel::name, _prelu_kernel::overload_name)
      .typed<_prelu_kernel::schema>();
}

// aten::_prelu_kernel(Tensor self, Tensor weight) -> Tensor
at::Tensor _prelu_kernel::call(const at::Tensor & self, const at::Tensor & weight) {
    
    static auto op = create__prelu_kernel_typed_handle();
    return op.call(self, weight);
}

// aten::_prelu_kernel(Tensor self, Tensor weight) -> Tensor
at::Tensor _prelu_kernel::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight) {
    
    static auto op = create__prelu_kernel_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight);
}

// aten::rsqrt(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rsqrt::schema> create_rsqrt_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rsqrt::name, rsqrt::overload_name)
      .typed<rsqrt::schema>();
}

// aten::rsqrt(Tensor self) -> Tensor
at::Tensor rsqrt::call(const at::Tensor & self) {
    
    static auto op = create_rsqrt_typed_handle();
    return op.call(self);
}

// aten::rsqrt(Tensor self) -> Tensor
at::Tensor rsqrt::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_rsqrt_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rsqrt_::schema> create_rsqrt__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rsqrt_::name, rsqrt_::overload_name)
      .typed<rsqrt_::schema>();
}

// aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & rsqrt_::call(at::Tensor & self) {
    
    static auto op = create_rsqrt__typed_handle();
    return op.call(self);
}

// aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & rsqrt_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_rsqrt__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rsqrt_out::schema> create_rsqrt_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rsqrt_out::name, rsqrt_out::overload_name)
      .typed<rsqrt_out::schema>();
}

// aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rsqrt_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_rsqrt_out_typed_handle();
    return op.call(self, out);
}

// aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rsqrt_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_rsqrt_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_select_backward::schema> create__nested_select_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_select_backward::name, _nested_select_backward::overload_name)
      .typed<_nested_select_backward::schema>();
}

// aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor
at::Tensor _nested_select_backward::call(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index) {
    
    static auto op = create__nested_select_backward_typed_handle();
    return op.call(grad_output, self, dim, index);
}

// aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor
at::Tensor _nested_select_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index) {
    
    static auto op = create__nested_select_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, dim, index);
}

// aten::sym_size.int(Tensor self, int dim) -> SymInt
static C10_NOINLINE c10::TypedOperatorHandle<sym_size_int::schema> create_sym_size_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sym_size_int::name, sym_size_int::overload_name)
      .typed<sym_size_int::schema>();
}

// aten::sym_size.int(Tensor self, int dim) -> SymInt
c10::SymInt sym_size_int::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_sym_size_int_typed_handle();
    return op.call(self, dim);
}

// aten::sym_size.int(Tensor self, int dim) -> SymInt
c10::SymInt sym_size_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_sym_size_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<vsplit_int::schema> create_vsplit_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(vsplit_int::name, vsplit_int::overload_name)
      .typed<vsplit_int::schema>();
}

// aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
::std::vector<at::Tensor> vsplit_int::call(const at::Tensor & self, int64_t sections) {
    
    static auto op = create_vsplit_int_typed_handle();
    return op.call(self, sections);
}

// aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
::std::vector<at::Tensor> vsplit_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections) {
    
    static auto op = create_vsplit_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, sections);
}

// aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<vsplit_array::schema> create_vsplit_array_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(vsplit_array::name, vsplit_array::overload_name)
      .typed<vsplit_array::schema>();
}

// aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
::std::vector<at::Tensor> vsplit_array::call(const at::Tensor & self, at::IntArrayRef indices) {
    
    static auto op = create_vsplit_array_typed_handle();
    return op.call(self, indices);
}

// aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
::std::vector<at::Tensor> vsplit_array::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices) {
    
    static auto op = create_vsplit_array_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices);
}

// aten::hstack(Tensor[] tensors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hstack::schema> create_hstack_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hstack::name, hstack::overload_name)
      .typed<hstack::schema>();
}

// aten::hstack(Tensor[] tensors) -> Tensor
at::Tensor hstack::call(at::TensorList tensors) {
    
    static auto op = create_hstack_typed_handle();
    return op.call(tensors);
}

// aten::hstack(Tensor[] tensors) -> Tensor
at::Tensor hstack::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_hstack_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hstack_out::schema> create_hstack_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hstack_out::name, hstack_out::overload_name)
      .typed<hstack_out::schema>();
}

// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hstack_out::call(at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_hstack_out_typed_handle();
    return op.call(tensors, out);
}

// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hstack_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_hstack_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, out);
}

// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<istft::schema> create_istft_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(istft::name, istft::overload_name)
      .typed<istft::schema>();
}

// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor
at::Tensor istft::call(const at::Tensor & self, int64_t n_fft, ::std::optional<int64_t> hop_length, ::std::optional<int64_t> win_length, const ::std::optional<at::Tensor> & window, bool center, bool normalized, ::std::optional<bool> onesided, ::std::optional<int64_t> length, bool return_complex) {
    
    static auto op = create_istft_typed_handle();
    return op.call(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex);
}

// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor
at::Tensor istft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n_fft, ::std::optional<int64_t> hop_length, ::std::optional<int64_t> win_length, const ::std::optional<at::Tensor> & window, bool center, bool normalized, ::std::optional<bool> onesided, ::std::optional<int64_t> length, bool return_complex) {
    
    static auto op = create_istft_typed_handle();
    return op.redispatch(dispatchKeySet, self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex);
}

// aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sum::schema> create_sum_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sum::name, sum::overload_name)
      .typed<sum::schema>();
}

// aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor
at::Tensor sum::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_sum_typed_handle();
    return op.call(self, dtype);
}

// aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor
at::Tensor sum::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_sum_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype);
}

// aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sum_dim_IntList::schema> create_sum_dim_IntList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sum_dim_IntList::name, sum_dim_IntList::overload_name)
      .typed<sum_dim_IntList::schema>();
}

// aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor sum_dim_IntList::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_sum_dim_IntList_typed_handle();
    return op.call(self, dim, keepdim, dtype);
}

// aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor sum_dim_IntList::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_sum_dim_IntList_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
}

// aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sum_dim_DimnameList::schema> create_sum_dim_DimnameList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sum_dim_DimnameList::name, sum_dim_DimnameList::overload_name)
      .typed<sum_dim_DimnameList::schema>();
}

// aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor sum_dim_DimnameList::call(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_sum_dim_DimnameList_typed_handle();
    return op.call(self, dim, keepdim, dtype);
}

// aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor sum_dim_DimnameList::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_sum_dim_DimnameList_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
}

// aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sum_IntList_out::schema> create_sum_IntList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sum_IntList_out::name, sum_IntList_out::overload_name)
      .typed<sum_IntList_out::schema>();
}

// aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sum_IntList_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_sum_IntList_out_typed_handle();
    return op.call(self, dim, keepdim, dtype, out);
}

// aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sum_IntList_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_sum_IntList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
}

// aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sum_DimnameList_out::schema> create_sum_DimnameList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sum_DimnameList_out::name, sum_DimnameList_out::overload_name)
      .typed<sum_DimnameList_out::schema>();
}

// aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sum_DimnameList_out::call(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_sum_DimnameList_out_typed_handle();
    return op.call(self, dim, keepdim, dtype, out);
}

// aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sum_DimnameList_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_sum_DimnameList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
}

// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nansum::schema> create_nansum_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nansum::name, nansum::overload_name)
      .typed<nansum::schema>();
}

// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor nansum::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_nansum_typed_handle();
    return op.call(self, dim, keepdim, dtype);
}

// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor nansum::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_nansum_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
}

// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nansum_out::schema> create_nansum_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nansum_out::name, nansum_out::overload_name)
      .typed<nansum_out::schema>();
}

// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nansum_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_nansum_out_typed_handle();
    return op.call(self, dim, keepdim, dtype, out);
}

// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nansum_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_nansum_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
}

// aten::flipud(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<flipud::schema> create_flipud_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(flipud::name, flipud::overload_name)
      .typed<flipud::schema>();
}

// aten::flipud(Tensor self) -> Tensor
at::Tensor flipud::call(const at::Tensor & self) {
    
    static auto op = create_flipud_typed_handle();
    return op.call(self);
}

// aten::flipud(Tensor self) -> Tensor
at::Tensor flipud::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_flipud_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rot90::schema> create_rot90_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rot90::name, rot90::overload_name)
      .typed<rot90::schema>();
}

// aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor
at::Tensor rot90::call(const at::Tensor & self, int64_t k, at::IntArrayRef dims) {
    
    static auto op = create_rot90_typed_handle();
    return op.call(self, k, dims);
}

// aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor
at::Tensor rot90::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::IntArrayRef dims) {
    
    static auto op = create_rot90_typed_handle();
    return op.redispatch(dispatchKeySet, self, k, dims);
}

// aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<trapz_x::schema> create_trapz_x_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(trapz_x::name, trapz_x::overload_name)
      .typed<trapz_x::schema>();
}

// aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
at::Tensor trapz_x::call(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
    
    static auto op = create_trapz_x_typed_handle();
    return op.call(y, x, dim);
}

// aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
at::Tensor trapz_x::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim) {
    
    static auto op = create_trapz_x_typed_handle();
    return op.redispatch(dispatchKeySet, y, x, dim);
}

// aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<trapz_dx::schema> create_trapz_dx_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(trapz_dx::name, trapz_dx::overload_name)
      .typed<trapz_dx::schema>();
}

// aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor
at::Tensor trapz_dx::call(const at::Tensor & y, double dx, int64_t dim) {
    
    static auto op = create_trapz_dx_typed_handle();
    return op.call(y, dx, dim);
}

// aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor
at::Tensor trapz_dx::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, double dx, int64_t dim) {
    
    static auto op = create_trapz_dx_typed_handle();
    return op.redispatch(dispatchKeySet, y, dx, dim);
}

// aten::_nested_tensor_strides(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_strides::schema> create__nested_tensor_strides_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_strides::name, _nested_tensor_strides::overload_name)
      .typed<_nested_tensor_strides::schema>();
}

// aten::_nested_tensor_strides(Tensor self) -> Tensor
at::Tensor _nested_tensor_strides::call(const at::Tensor & self) {
    
    static auto op = create__nested_tensor_strides_typed_handle();
    return op.call(self);
}

// aten::_nested_tensor_strides(Tensor self) -> Tensor
at::Tensor _nested_tensor_strides::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__nested_tensor_strides_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_nested_tensor_storage_offsets(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_storage_offsets::schema> create__nested_tensor_storage_offsets_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_storage_offsets::name, _nested_tensor_storage_offsets::overload_name)
      .typed<_nested_tensor_storage_offsets::schema>();
}

// aten::_nested_tensor_storage_offsets(Tensor self) -> Tensor
at::Tensor _nested_tensor_storage_offsets::call(const at::Tensor & self) {
    
    static auto op = create__nested_tensor_storage_offsets_typed_handle();
    return op.call(self);
}

// aten::_nested_tensor_storage_offsets(Tensor self) -> Tensor
at::Tensor _nested_tensor_storage_offsets::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__nested_tensor_storage_offsets_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_nested_get_ragged_idx(Tensor self) -> int
static C10_NOINLINE c10::TypedOperatorHandle<_nested_get_ragged_idx::schema> create__nested_get_ragged_idx_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_get_ragged_idx::name, _nested_get_ragged_idx::overload_name)
      .typed<_nested_get_ragged_idx::schema>();
}

// aten::_nested_get_ragged_idx(Tensor self) -> int
int64_t _nested_get_ragged_idx::call(const at::Tensor & self) {
    
    static auto op = create__nested_get_ragged_idx_typed_handle();
    return op.call(self);
}

// aten::_nested_get_ragged_idx(Tensor self) -> int
int64_t _nested_get_ragged_idx::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__nested_get_ragged_idx_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_nested_get_min_seqlen(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_get_min_seqlen::schema> create__nested_get_min_seqlen_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_get_min_seqlen::name, _nested_get_min_seqlen::overload_name)
      .typed<_nested_get_min_seqlen::schema>();
}

// aten::_nested_get_min_seqlen(Tensor self) -> Tensor
at::Tensor _nested_get_min_seqlen::call(const at::Tensor & self) {
    
    static auto op = create__nested_get_min_seqlen_typed_handle();
    return op.call(self);
}

// aten::_nested_get_min_seqlen(Tensor self) -> Tensor
at::Tensor _nested_get_min_seqlen::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__nested_get_min_seqlen_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<triplet_margin_loss::schema> create_triplet_margin_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(triplet_margin_loss::name, triplet_margin_loss::overload_name)
      .typed<triplet_margin_loss::schema>();
}

// aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor
at::Tensor triplet_margin_loss::call(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) {
    
    static auto op = create_triplet_margin_loss_typed_handle();
    return op.call(anchor, positive, negative, margin, p, eps, swap, reduction);
}

// aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor
at::Tensor triplet_margin_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) {
    
    static auto op = create_triplet_margin_loss_typed_handle();
    return op.redispatch(dispatchKeySet, anchor, positive, negative, margin, p, eps, swap, reduction);
}

// aten::trunc(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<trunc::schema> create_trunc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(trunc::name, trunc::overload_name)
      .typed<trunc::schema>();
}

// aten::trunc(Tensor self) -> Tensor
at::Tensor trunc::call(const at::Tensor & self) {
    
    static auto op = create_trunc_typed_handle();
    return op.call(self);
}

// aten::trunc(Tensor self) -> Tensor
at::Tensor trunc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_trunc_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::trunc_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<trunc_::schema> create_trunc__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(trunc_::name, trunc_::overload_name)
      .typed<trunc_::schema>();
}

// aten::trunc_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & trunc_::call(at::Tensor & self) {
    
    static auto op = create_trunc__typed_handle();
    return op.call(self);
}

// aten::trunc_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & trunc_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_trunc__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<trunc_out::schema> create_trunc_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(trunc_out::name, trunc_out::overload_name)
      .typed<trunc_out::schema>();
}

// aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & trunc_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_trunc_out_typed_handle();
    return op.call(self, out);
}

// aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & trunc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_trunc_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::var(Tensor self, bool unbiased=True) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<var::schema> create_var_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var::name, var::overload_name)
      .typed<var::schema>();
}

// aten::var(Tensor self, bool unbiased=True) -> Tensor
at::Tensor var::call(const at::Tensor & self, bool unbiased) {
    
    static auto op = create_var_typed_handle();
    return op.call(self, unbiased);
}

// aten::var(Tensor self, bool unbiased=True) -> Tensor
at::Tensor var::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased) {
    
    static auto op = create_var_typed_handle();
    return op.redispatch(dispatchKeySet, self, unbiased);
}

// aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<var_dim::schema> create_var_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_dim::name, var_dim::overload_name)
      .typed<var_dim::schema>();
}

// aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
at::Tensor var_dim::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_dim_typed_handle();
    return op.call(self, dim, unbiased, keepdim);
}

// aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
at::Tensor var_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
}

// aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<var_correction::schema> create_var_correction_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_correction::name, var_correction::overload_name)
      .typed<var_correction::schema>();
}

// aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor
at::Tensor var_correction::call(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_correction_typed_handle();
    return op.call(self, dim, correction, keepdim);
}

// aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor
at::Tensor var_correction::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_correction_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
}

// aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<var_out::schema> create_var_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_out::name, var_out::overload_name)
      .typed<var_out::schema>();
}

// aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_out_typed_handle();
    return op.call(self, dim, unbiased, keepdim, out);
}

// aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out);
}

// aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<var_correction_out::schema> create_var_correction_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_correction_out::name, var_correction_out::overload_name)
      .typed<var_correction_out::schema>();
}

// aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_correction_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_correction_out_typed_handle();
    return op.call(self, dim, correction, keepdim, out);
}

// aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_correction_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_correction_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim, out);
}

// aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<var_names_dim::schema> create_var_names_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_names_dim::name, var_names_dim::overload_name)
      .typed<var_names_dim::schema>();
}

// aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
at::Tensor var_names_dim::call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_names_dim_typed_handle();
    return op.call(self, dim, unbiased, keepdim);
}

// aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
at::Tensor var_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_names_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
}

// aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<var_names_out::schema> create_var_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_names_out::name, var_names_out::overload_name)
      .typed<var_names_out::schema>();
}

// aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_names_out::call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_names_out_typed_handle();
    return op.call(self, dim, unbiased, keepdim, out);
}

// aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out);
}

// aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<var_correction_names::schema> create_var_correction_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_correction_names::name, var_correction_names::overload_name)
      .typed<var_correction_names::schema>();
}

// aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor
at::Tensor var_correction_names::call(const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_correction_names_typed_handle();
    return op.call(self, dim, correction, keepdim);
}

// aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor
at::Tensor var_correction_names::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_correction_names_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
}

// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<var_correction_names_out::schema> create_var_correction_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_correction_names_out::name, var_correction_names_out::overload_name)
      .typed<var_correction_names_out::schema>();
}

// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_correction_names_out::call(const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_correction_names_out_typed_handle();
    return op.call(self, dim, correction, keepdim, out);
}

// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_correction_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_correction_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim, out);
}

// aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<var_mean::schema> create_var_mean_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_mean::name, var_mean::overload_name)
      .typed<var_mean::schema>();
}

// aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean::call(const at::Tensor & self, bool unbiased) {
    
    static auto op = create_var_mean_typed_handle();
    return op.call(self, unbiased);
}

// aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased) {
    
    static auto op = create_var_mean_typed_handle();
    return op.redispatch(dispatchKeySet, self, unbiased);
}

// aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<var_mean_dim::schema> create_var_mean_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_mean_dim::name, var_mean_dim::overload_name)
      .typed<var_mean_dim::schema>();
}

// aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_dim::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_mean_dim_typed_handle();
    return op.call(self, dim, unbiased, keepdim);
}

// aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_mean_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
}

// aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<var_mean_correction::schema> create_var_mean_correction_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_mean_correction::name, var_mean_correction::overload_name)
      .typed<var_mean_correction::schema>();
}

// aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_correction::call(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_mean_correction_typed_handle();
    return op.call(self, dim, correction, keepdim);
}

// aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_correction::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_mean_correction_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
}

// aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<var_mean_names_dim::schema> create_var_mean_names_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_mean_names_dim::name, var_mean_names_dim::overload_name)
      .typed<var_mean_names_dim::schema>();
}

// aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_names_dim::call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_mean_names_dim_typed_handle();
    return op.call(self, dim, unbiased, keepdim);
}

// aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_mean_names_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
}

// aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<var_mean_correction_names::schema> create_var_mean_correction_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_mean_correction_names::name, var_mean_correction_names::overload_name)
      .typed<var_mean_correction_names::schema>();
}

// aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_correction_names::call(const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_mean_correction_names_typed_handle();
    return op.call(self, dim, correction, keepdim);
}

// aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_correction_names::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_mean_correction_names_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
}

// aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<norm_except_dim::schema> create_norm_except_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_except_dim::name, norm_except_dim::overload_name)
      .typed<norm_except_dim::schema>();
}

// aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor
at::Tensor norm_except_dim::call(const at::Tensor & v, int64_t pow, int64_t dim) {
    
    static auto op = create_norm_except_dim_typed_handle();
    return op.call(v, pow, dim);
}

// aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor
at::Tensor norm_except_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, int64_t pow, int64_t dim) {
    
    static auto op = create_norm_except_dim_typed_handle();
    return op.redispatch(dispatchKeySet, v, pow, dim);
}

// aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_standard_gamma_grad::schema> create__standard_gamma_grad_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_standard_gamma_grad::name, _standard_gamma_grad::overload_name)
      .typed<_standard_gamma_grad::schema>();
}

// aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor
at::Tensor _standard_gamma_grad::call(const at::Tensor & self, const at::Tensor & output) {
    
    static auto op = create__standard_gamma_grad_typed_handle();
    return op.call(self, output);
}

// aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor
at::Tensor _standard_gamma_grad::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & output) {
    
    static auto op = create__standard_gamma_grad_typed_handle();
    return op.redispatch(dispatchKeySet, self, output);
}

// aten::native_norm(Tensor self, Scalar p=2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<native_norm::schema> create_native_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_norm::name, native_norm::overload_name)
      .typed<native_norm::schema>();
}

// aten::native_norm(Tensor self, Scalar p=2) -> Tensor
at::Tensor native_norm::call(const at::Tensor & self, const at::Scalar & p) {
    
    static auto op = create_native_norm_typed_handle();
    return op.call(self, p);
}

// aten::native_norm(Tensor self, Scalar p=2) -> Tensor
at::Tensor native_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p) {
    
    static auto op = create_native_norm_typed_handle();
    return op.redispatch(dispatchKeySet, self, p);
}

// aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<native_norm_ScalarOpt_dim_dtype::schema> create_native_norm_ScalarOpt_dim_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_norm_ScalarOpt_dim_dtype::name, native_norm_ScalarOpt_dim_dtype::overload_name)
      .typed<native_norm_ScalarOpt_dim_dtype::schema>();
}

// aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor
at::Tensor native_norm_ScalarOpt_dim_dtype::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_native_norm_ScalarOpt_dim_dtype_typed_handle();
    return op.call(self, p, dim, keepdim, dtype);
}

// aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor
at::Tensor native_norm_ScalarOpt_dim_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_native_norm_ScalarOpt_dim_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, dtype);
}

// aten::_batch_norm_with_update(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_batch_norm_with_update::schema> create__batch_norm_with_update_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_batch_norm_with_update::name, _batch_norm_with_update::overload_name)
      .typed<_batch_norm_with_update::schema>();
}

// aten::_batch_norm_with_update(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_with_update::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps) {
    
    static auto op = create__batch_norm_with_update_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, momentum, eps);
}

// aten::_batch_norm_with_update(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_with_update::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps) {
    
    static auto op = create__batch_norm_with_update_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, momentum, eps);
}

// aten::_batch_norm_with_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd, Tensor(g!) reserve) -> (Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))
static C10_NOINLINE c10::TypedOperatorHandle<_batch_norm_with_update_out::schema> create__batch_norm_with_update_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_batch_norm_with_update_out::name, _batch_norm_with_update_out::overload_name)
      .typed<_batch_norm_with_update_out::schema>();
}

// aten::_batch_norm_with_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd, Tensor(g!) reserve) -> (Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _batch_norm_with_update_out::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, at::Tensor & reserve) {
    
    static auto op = create__batch_norm_with_update_out_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, momentum, eps, out, save_mean, save_invstd, reserve);
}

// aten::_batch_norm_with_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd, Tensor(g!) reserve) -> (Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _batch_norm_with_update_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, at::Tensor & reserve) {
    
    static auto op = create__batch_norm_with_update_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, momentum, eps, out, save_mean, save_invstd, reserve);
}

// aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sum_backward::schema> create__sparse_sum_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_sum_backward::name, _sparse_sum_backward::overload_name)
      .typed<_sparse_sum_backward::schema>();
}

// aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor
at::Tensor _sparse_sum_backward::call(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create__sparse_sum_backward_typed_handle();
    return op.call(grad, self, dim);
}

// aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor
at::Tensor _sparse_sum_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create__sparse_sum_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, self, dim);
}

// aten::_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_csr_sum_dim_dtype::schema> create__sparse_csr_sum_dim_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_csr_sum_dim_dtype::name, _sparse_csr_sum_dim_dtype::overload_name)
      .typed<_sparse_csr_sum_dim_dtype::schema>();
}

// aten::_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_csr_sum_dim_dtype::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_csr_sum_dim_dtype_typed_handle();
    return op.call(self, dim, keepdim, dtype);
}

// aten::_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_csr_sum_dim_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_csr_sum_dim_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
}

// aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_softmax_int::schema> create__sparse_softmax_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_softmax_int::name, _sparse_softmax_int::overload_name)
      .typed<_sparse_softmax_int::schema>();
}

// aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_softmax_int::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_softmax_int_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_softmax_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_softmax_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_softmax_Dimname::schema> create__sparse_softmax_Dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_softmax_Dimname::name, _sparse_softmax_Dimname::overload_name)
      .typed<_sparse_softmax_Dimname::schema>();
}

// aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_softmax_Dimname::call(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_softmax_Dimname_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_softmax_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_softmax_Dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_softmax::schema> create__sparse_softmax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_softmax::name, _sparse_softmax::overload_name)
      .typed<_sparse_softmax::schema>();
}

// aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
at::Tensor _sparse_softmax::call(const at::Tensor & self, int64_t dim, bool half_to_float) {
    
    static auto op = create__sparse_softmax_typed_handle();
    return op.call(self, dim, half_to_float);
}

// aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
at::Tensor _sparse_softmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) {
    
    static auto op = create__sparse_softmax_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, half_to_float);
}

// aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<norm_ScalarOpt_dtype::schema> create_norm_ScalarOpt_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_ScalarOpt_dtype::name, norm_ScalarOpt_dtype::overload_name)
      .typed<norm_ScalarOpt_dtype::schema>();
}

// aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor
at::Tensor norm_ScalarOpt_dtype::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::ScalarType dtype) {
    
    static auto op = create_norm_ScalarOpt_dtype_typed_handle();
    return op.call(self, p, dtype);
}

// aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor
at::Tensor norm_ScalarOpt_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::ScalarType dtype) {
    
    static auto op = create_norm_ScalarOpt_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dtype);
}

// aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<norm_Scalar::schema> create_norm_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_Scalar::name, norm_Scalar::overload_name)
      .typed<norm_Scalar::schema>();
}

// aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor
at::Tensor norm_Scalar::call(const at::Tensor & self, const at::Scalar & p) {
    
    static auto op = create_norm_Scalar_typed_handle();
    return op.call(self, p);
}

// aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor
at::Tensor norm_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p) {
    
    static auto op = create_norm_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, p);
}

// aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<norm_ScalarOpt_dim_dtype::schema> create_norm_ScalarOpt_dim_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_ScalarOpt_dim_dtype::name, norm_ScalarOpt_dim_dtype::overload_name)
      .typed<norm_ScalarOpt_dim_dtype::schema>();
}

// aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
at::Tensor norm_ScalarOpt_dim_dtype::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
    
    static auto op = create_norm_ScalarOpt_dim_dtype_typed_handle();
    return op.call(self, p, dim, keepdim, dtype);
}

// aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
at::Tensor norm_ScalarOpt_dim_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
    
    static auto op = create_norm_ScalarOpt_dim_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, dtype);
}

// aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<norm_ScalarOpt_dim::schema> create_norm_ScalarOpt_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_ScalarOpt_dim::name, norm_ScalarOpt_dim::overload_name)
      .typed<norm_ScalarOpt_dim::schema>();
}

// aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor
at::Tensor norm_ScalarOpt_dim::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_norm_ScalarOpt_dim_typed_handle();
    return op.call(self, p, dim, keepdim);
}

// aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor
at::Tensor norm_ScalarOpt_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_norm_ScalarOpt_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim);
}

// aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<norm_dtype_out::schema> create_norm_dtype_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_dtype_out::name, norm_dtype_out::overload_name)
      .typed<norm_dtype_out::schema>();
}

// aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_dtype_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_norm_dtype_out_typed_handle();
    return op.call(self, p, dim, keepdim, dtype, out);
}

// aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_norm_dtype_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out);
}

// aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<norm_out::schema> create_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_out::name, norm_out::overload_name)
      .typed<norm_out::schema>();
}

// aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_norm_out_typed_handle();
    return op.call(self, p, dim, keepdim, out);
}

// aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, out);
}

// aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<norm_names_ScalarOpt_dim_dtype::schema> create_norm_names_ScalarOpt_dim_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_names_ScalarOpt_dim_dtype::name, norm_names_ScalarOpt_dim_dtype::overload_name)
      .typed<norm_names_ScalarOpt_dim_dtype::schema>();
}

// aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
at::Tensor norm_names_ScalarOpt_dim_dtype::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) {
    
    static auto op = create_norm_names_ScalarOpt_dim_dtype_typed_handle();
    return op.call(self, p, dim, keepdim, dtype);
}

// aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
at::Tensor norm_names_ScalarOpt_dim_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) {
    
    static auto op = create_norm_names_ScalarOpt_dim_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, dtype);
}

// aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<norm_names_ScalarOpt_dim::schema> create_norm_names_ScalarOpt_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_names_ScalarOpt_dim::name, norm_names_ScalarOpt_dim::overload_name)
      .typed<norm_names_ScalarOpt_dim::schema>();
}

// aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor
at::Tensor norm_names_ScalarOpt_dim::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) {
    
    static auto op = create_norm_names_ScalarOpt_dim_typed_handle();
    return op.call(self, p, dim, keepdim);
}

// aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor
at::Tensor norm_names_ScalarOpt_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) {
    
    static auto op = create_norm_names_ScalarOpt_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim);
}

// aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<norm_names_dtype_out::schema> create_norm_names_dtype_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_names_dtype_out::name, norm_names_dtype_out::overload_name)
      .typed<norm_names_dtype_out::schema>();
}

// aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_names_dtype_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_norm_names_dtype_out_typed_handle();
    return op.call(self, p, dim, keepdim, dtype, out);
}

// aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_names_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_norm_names_dtype_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out);
}

// aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<norm_names_out::schema> create_norm_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_names_out::name, norm_names_out::overload_name)
      .typed<norm_names_out::schema>();
}

// aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_names_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_norm_names_out_typed_handle();
    return op.call(self, p, dim, keepdim, out);
}

// aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_norm_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, out);
}

// aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nuclear_norm::schema> create_nuclear_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nuclear_norm::name, nuclear_norm::overload_name)
      .typed<nuclear_norm::schema>();
}

// aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor
at::Tensor nuclear_norm::call(const at::Tensor & self, bool keepdim) {
    
    static auto op = create_nuclear_norm_typed_handle();
    return op.call(self, keepdim);
}

// aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor
at::Tensor nuclear_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool keepdim) {
    
    static auto op = create_nuclear_norm_typed_handle();
    return op.redispatch(dispatchKeySet, self, keepdim);
}

// aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nuclear_norm_out::schema> create_nuclear_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nuclear_norm_out::name, nuclear_norm_out::overload_name)
      .typed<nuclear_norm_out::schema>();
}

// aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nuclear_norm_out::call(const at::Tensor & self, bool keepdim, at::Tensor & out) {
    
    static auto op = create_nuclear_norm_out_typed_handle();
    return op.call(self, keepdim, out);
}

// aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nuclear_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool keepdim, at::Tensor & out) {
    
    static auto op = create_nuclear_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, keepdim, out);
}

// aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nuclear_norm_dim::schema> create_nuclear_norm_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nuclear_norm_dim::name, nuclear_norm_dim::overload_name)
      .typed<nuclear_norm_dim::schema>();
}

// aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor
at::Tensor nuclear_norm_dim::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_nuclear_norm_dim_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor
at::Tensor nuclear_norm_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_nuclear_norm_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nuclear_norm_dim_out::schema> create_nuclear_norm_dim_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nuclear_norm_dim_out::name, nuclear_norm_dim_out::overload_name)
      .typed<nuclear_norm_dim_out::schema>();
}

// aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nuclear_norm_dim_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_nuclear_norm_dim_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nuclear_norm_dim_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_nuclear_norm_dim_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::_sparse_compressed_tensor_with_dims(int nnz, int dense_dim, int[] size, int[] blocksize, ScalarType index_dtype, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_compressed_tensor_with_dims::schema> create__sparse_compressed_tensor_with_dims_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_compressed_tensor_with_dims::name, _sparse_compressed_tensor_with_dims::overload_name)
      .typed<_sparse_compressed_tensor_with_dims::schema>();
}

// aten::_sparse_compressed_tensor_with_dims(int nnz, int dense_dim, int[] size, int[] blocksize, ScalarType index_dtype, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor _sparse_compressed_tensor_with_dims::call(int64_t nnz, int64_t dense_dim, at::IntArrayRef size, at::IntArrayRef blocksize, at::ScalarType index_dtype, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_compressed_tensor_with_dims_typed_handle();
    return op.call(nnz, dense_dim, size, blocksize, index_dtype, dtype, layout, device, pin_memory);
}

// aten::_sparse_compressed_tensor_with_dims(int nnz, int dense_dim, int[] size, int[] blocksize, ScalarType index_dtype, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor _sparse_compressed_tensor_with_dims::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t nnz, int64_t dense_dim, at::IntArrayRef size, at::IntArrayRef blocksize, at::ScalarType index_dtype, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_compressed_tensor_with_dims_typed_handle();
    return op.redispatch(dispatchKeySet, nnz, dense_dim, size, blocksize, index_dtype, dtype, layout, device, pin_memory);
}

// aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_csc_tensor_unsafe::schema> create__sparse_csc_tensor_unsafe_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_csc_tensor_unsafe::name, _sparse_csc_tensor_unsafe::overload_name)
      .typed<_sparse_csc_tensor_unsafe::schema>();
}

// aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _sparse_csc_tensor_unsafe::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_csc_tensor_unsafe_typed_handle();
    return op.call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _sparse_csc_tensor_unsafe::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_csc_tensor_unsafe_typed_handle();
    return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size, bool? is_coalesced=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_validate_sparse_coo_tensor_args::schema> create__validate_sparse_coo_tensor_args_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_validate_sparse_coo_tensor_args::name, _validate_sparse_coo_tensor_args::overload_name)
      .typed<_validate_sparse_coo_tensor_args::schema>();
}

// aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size, bool? is_coalesced=None) -> ()
void _validate_sparse_coo_tensor_args::call(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<bool> is_coalesced) {
    
    static auto op = create__validate_sparse_coo_tensor_args_typed_handle();
    return op.call(indices, values, size, is_coalesced);
}

// aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size, bool? is_coalesced=None) -> ()
void _validate_sparse_coo_tensor_args::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<bool> is_coalesced) {
    
    static auto op = create__validate_sparse_coo_tensor_args_typed_handle();
    return op.redispatch(dispatchKeySet, indices, values, size, is_coalesced);
}

// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_coo_tensor_with_dims_and_tensors::schema> create__sparse_coo_tensor_with_dims_and_tensors_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_coo_tensor_with_dims_and_tensors::name, _sparse_coo_tensor_with_dims_and_tensors::overload_name)
      .typed<_sparse_coo_tensor_with_dims_and_tensors::schema>();
}

// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor
at::Tensor _sparse_coo_tensor_with_dims_and_tensors::call(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
    
    static auto op = create__sparse_coo_tensor_with_dims_and_tensors_typed_handle();
    return op.call(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced);
}

// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor
at::Tensor _sparse_coo_tensor_with_dims_and_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
    
    static auto op = create__sparse_coo_tensor_with_dims_and_tensors_typed_handle();
    return op.redispatch(dispatchKeySet, sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced);
}

// aten::_sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_mask_projection::schema> create__sparse_mask_projection_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_mask_projection::name, _sparse_mask_projection::overload_name)
      .typed<_sparse_mask_projection::schema>();
}

// aten::_sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor
at::Tensor _sparse_mask_projection::call(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches) {
    
    static auto op = create__sparse_mask_projection_typed_handle();
    return op.call(self, mask, accumulate_matches);
}

// aten::_sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor
at::Tensor _sparse_mask_projection::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches) {
    
    static auto op = create__sparse_mask_projection_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, accumulate_matches);
}

// aten::_to_dense(Tensor self, ScalarType? dtype=None, bool? masked_grad=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_to_dense::schema> create__to_dense_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_dense::name, _to_dense::overload_name)
      .typed<_to_dense::schema>();
}

// aten::_to_dense(Tensor self, ScalarType? dtype=None, bool? masked_grad=None) -> Tensor
at::Tensor _to_dense::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad) {
    
    static auto op = create__to_dense_typed_handle();
    return op.call(self, dtype, masked_grad);
}

// aten::_to_dense(Tensor self, ScalarType? dtype=None, bool? masked_grad=None) -> Tensor
at::Tensor _to_dense::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad) {
    
    static auto op = create__to_dense_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, masked_grad);
}

// aten::is_coalesced(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_coalesced::schema> create_is_coalesced_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_coalesced::name, is_coalesced::overload_name)
      .typed<is_coalesced::schema>();
}

// aten::is_coalesced(Tensor self) -> bool
bool is_coalesced::call(const at::Tensor & self) {
    
    static auto op = create_is_coalesced_typed_handle();
    return op.call(self);
}

// aten::is_coalesced(Tensor self) -> bool
bool is_coalesced::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_is_coalesced_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_coalesced_::schema> create__coalesced__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_coalesced_::name, _coalesced_::overload_name)
      .typed<_coalesced_::schema>();
}

// aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)
at::Tensor & _coalesced_::call(at::Tensor & self, bool coalesced) {
    
    static auto op = create__coalesced__typed_handle();
    return op.call(self, coalesced);
}

// aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)
at::Tensor & _coalesced_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, bool coalesced) {
    
    static auto op = create__coalesced__typed_handle();
    return op.redispatch(dispatchKeySet, self, coalesced);
}

// aten::indices(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<indices::schema> create_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(indices::name, indices::overload_name)
      .typed<indices::schema>();
}

// aten::indices(Tensor(a) self) -> Tensor(a)
at::Tensor indices::call(const at::Tensor & self) {
    
    static auto op = create_indices_typed_handle();
    return op.call(self);
}

// aten::indices(Tensor(a) self) -> Tensor(a)
at::Tensor indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_indices_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::col_indices(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<col_indices::schema> create_col_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(col_indices::name, col_indices::overload_name)
      .typed<col_indices::schema>();
}

// aten::col_indices(Tensor(a) self) -> Tensor(a)
at::Tensor col_indices::call(const at::Tensor & self) {
    
    static auto op = create_col_indices_typed_handle();
    return op.call(self);
}

// aten::col_indices(Tensor(a) self) -> Tensor(a)
at::Tensor col_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_col_indices_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hspmm_out::schema> create_hspmm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hspmm_out::name, hspmm_out::overload_name)
      .typed<hspmm_out::schema>();
}

// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hspmm_out::call(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) {
    
    static auto op = create_hspmm_out_typed_handle();
    return op.call(mat1, mat2, out);
}

// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hspmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) {
    
    static auto op = create_hspmm_out_typed_handle();
    return op.redispatch(dispatchKeySet, mat1, mat2, out);
}

// aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hspmm::schema> create_hspmm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hspmm::name, hspmm::overload_name)
      .typed<hspmm::schema>();
}

// aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor
at::Tensor hspmm::call(const at::Tensor & mat1, const at::Tensor & mat2) {
    
    static auto op = create_hspmm_typed_handle();
    return op.call(mat1, mat2);
}

// aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor
at::Tensor hspmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2) {
    
    static auto op = create_hspmm_typed_handle();
    return op.redispatch(dispatchKeySet, mat1, mat2);
}

// aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<to_sparse_bsc::schema> create_to_sparse_bsc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_sparse_bsc::name, to_sparse_bsc::overload_name)
      .typed<to_sparse_bsc::schema>();
}

// aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
at::Tensor to_sparse_bsc::call(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create_to_sparse_bsc_typed_handle();
    return op.call(self, blocksize, dense_dim);
}

// aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
at::Tensor to_sparse_bsc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create_to_sparse_bsc_typed_handle();
    return op.redispatch(dispatchKeySet, self, blocksize, dense_dim);
}

// aten::_to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_to_sparse_semi_structured::schema> create__to_sparse_semi_structured_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_sparse_semi_structured::name, _to_sparse_semi_structured::overload_name)
      .typed<_to_sparse_semi_structured::schema>();
}

// aten::_to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _to_sparse_semi_structured::call(const at::Tensor & dense) {
    
    static auto op = create__to_sparse_semi_structured_typed_handle();
    return op.call(dense);
}

// aten::_to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _to_sparse_semi_structured::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dense) {
    
    static auto op = create__to_sparse_semi_structured_typed_handle();
    return op.redispatch(dispatchKeySet, dense);
}

// aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor_dynamic::schema> create_quantize_per_tensor_dynamic_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor_dynamic::name, quantize_per_tensor_dynamic::overload_name)
      .typed<quantize_per_tensor_dynamic::schema>();
}

// aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor
at::Tensor quantize_per_tensor_dynamic::call(const at::Tensor & self, at::ScalarType dtype, bool reduce_range) {
    
    static auto op = create_quantize_per_tensor_dynamic_typed_handle();
    return op.call(self, dtype, reduce_range);
}

// aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor
at::Tensor quantize_per_tensor_dynamic::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool reduce_range) {
    
    static auto op = create_quantize_per_tensor_dynamic_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, reduce_range);
}

// aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor::schema> create_quantize_per_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor::name, quantize_per_tensor::overload_name)
      .typed<quantize_per_tensor::schema>();
}

// aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor
at::Tensor quantize_per_tensor::call(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) {
    
    static auto op = create_quantize_per_tensor_typed_handle();
    return op.call(self, scale, zero_point, dtype);
}

// aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor
at::Tensor quantize_per_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) {
    
    static auto op = create_quantize_per_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, dtype);
}

// aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor_tensor_qparams::schema> create_quantize_per_tensor_tensor_qparams_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor_tensor_qparams::name, quantize_per_tensor_tensor_qparams::overload_name)
      .typed<quantize_per_tensor_tensor_qparams::schema>();
}

// aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor
at::Tensor quantize_per_tensor_tensor_qparams::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) {
    
    static auto op = create_quantize_per_tensor_tensor_qparams_typed_handle();
    return op.call(self, scale, zero_point, dtype);
}

// aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor
at::Tensor quantize_per_tensor_tensor_qparams::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) {
    
    static auto op = create_quantize_per_tensor_tensor_qparams_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, dtype);
}

// aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor_tensors::schema> create_quantize_per_tensor_tensors_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor_tensors::name, quantize_per_tensor_tensors::overload_name)
      .typed<quantize_per_tensor_tensors::schema>();
}

// aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]
::std::vector<at::Tensor> quantize_per_tensor_tensors::call(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) {
    
    static auto op = create_quantize_per_tensor_tensors_typed_handle();
    return op.call(tensors, scales, zero_points, dtype);
}

// aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]
::std::vector<at::Tensor> quantize_per_tensor_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) {
    
    static auto op = create_quantize_per_tensor_tensors_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, scales, zero_points, dtype);
}

// aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_tensor_affine_cachemask::schema> create_fake_quantize_per_tensor_affine_cachemask_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fake_quantize_per_tensor_affine_cachemask::name, fake_quantize_per_tensor_affine_cachemask::overload_name)
      .typed<fake_quantize_per_tensor_affine_cachemask::schema>();
}

// aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_tensor_affine_cachemask::call(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create_fake_quantize_per_tensor_affine_cachemask_typed_handle();
    return op.call(self, scale, zero_point, quant_min, quant_max);
}

// aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_tensor_affine_cachemask::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create_fake_quantize_per_tensor_affine_cachemask_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max);
}

// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::schema> create__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::name, _fake_quantize_per_tensor_affine_cachemask_tensor_qparams::overload_name)
      .typed<_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::schema>();
}

// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
::std::tuple<at::Tensor,at::Tensor> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_typed_handle();
    return op.call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max);
}

// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
::std::tuple<at::Tensor,at::Tensor> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, fake_quant_enabled, quant_min, quant_max);
}

// aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_learnable_per_tensor_affine::schema> create__fake_quantize_learnable_per_tensor_affine_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fake_quantize_learnable_per_tensor_affine::name, _fake_quantize_learnable_per_tensor_affine::overload_name)
      .typed<_fake_quantize_learnable_per_tensor_affine::schema>();
}

// aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
at::Tensor _fake_quantize_learnable_per_tensor_affine::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
    
    static auto op = create__fake_quantize_learnable_per_tensor_affine_typed_handle();
    return op.call(self, scale, zero_point, quant_min, quant_max, grad_factor);
}

// aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
at::Tensor _fake_quantize_learnable_per_tensor_affine::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
    
    static auto op = create__fake_quantize_learnable_per_tensor_affine_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, grad_factor);
}

// aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<choose_qparams_optimized::schema> create_choose_qparams_optimized_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(choose_qparams_optimized::name, choose_qparams_optimized::overload_name)
      .typed<choose_qparams_optimized::schema>();
}

// aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> choose_qparams_optimized::call(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) {
    
    static auto op = create_choose_qparams_optimized_typed_handle();
    return op.call(input, numel, n_bins, ratio, bit_width);
}

// aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> choose_qparams_optimized::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) {
    
    static auto op = create_choose_qparams_optimized_typed_handle();
    return op.redispatch(dispatchKeySet, input, numel, n_bins, ratio, bit_width);
}

// aten::cartesian_prod(Tensor[] tensors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cartesian_prod::schema> create_cartesian_prod_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cartesian_prod::name, cartesian_prod::overload_name)
      .typed<cartesian_prod::schema>();
}

// aten::cartesian_prod(Tensor[] tensors) -> Tensor
at::Tensor cartesian_prod::call(at::TensorList tensors) {
    
    static auto op = create_cartesian_prod_typed_handle();
    return op.call(tensors);
}

// aten::cartesian_prod(Tensor[] tensors) -> Tensor
at::Tensor cartesian_prod::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_cartesian_prod_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType
static C10_NOINLINE c10::TypedOperatorHandle<promote_types::schema> create_promote_types_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(promote_types::name, promote_types::overload_name)
      .typed<promote_types::schema>();
}

// aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType
at::ScalarType promote_types::call(at::ScalarType type1, at::ScalarType type2) {
    
    static auto op = create_promote_types_typed_handle();
    return op.call(type1, type2);
}

// aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType
at::ScalarType promote_types::redispatch(c10::DispatchKeySet dispatchKeySet, at::ScalarType type1, at::ScalarType type2) {
    
    static auto op = create_promote_types_typed_handle();
    return op.redispatch(dispatchKeySet, type1, type2);
}

// aten::_local_scalar_dense(Tensor self) -> Scalar
static C10_NOINLINE c10::TypedOperatorHandle<_local_scalar_dense::schema> create__local_scalar_dense_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_local_scalar_dense::name, _local_scalar_dense::overload_name)
      .typed<_local_scalar_dense::schema>();
}

// aten::_local_scalar_dense(Tensor self) -> Scalar
at::Scalar _local_scalar_dense::call(const at::Tensor & self) {
    
    static auto op = create__local_scalar_dense_typed_handle();
    return op.call(self);
}

// aten::_local_scalar_dense(Tensor self) -> Scalar
at::Scalar _local_scalar_dense::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__local_scalar_dense_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_gru_cell_backward::schema> create__thnn_fused_gru_cell_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_thnn_fused_gru_cell_backward::name, _thnn_fused_gru_cell_backward::overload_name)
      .typed<_thnn_fused_gru_cell_backward::schema>();
}

// aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_gru_cell_backward::call(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) {
    
    static auto op = create__thnn_fused_gru_cell_backward_typed_handle();
    return op.call(grad_hy, workspace, has_bias);
}

// aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_gru_cell_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) {
    
    static auto op = create__thnn_fused_gru_cell_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_hy, workspace, has_bias);
}

// aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<rnn_relu_input::schema> create_rnn_relu_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rnn_relu_input::name, rnn_relu_input::overload_name)
      .typed<rnn_relu_input::schema>();
}

// aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> rnn_relu_input::call(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
    
    static auto op = create_rnn_relu_input_typed_handle();
    return op.call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
}

// aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> rnn_relu_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
    
    static auto op = create_rnn_relu_input_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
}

// aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<rnn_relu_data::schema> create_rnn_relu_data_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rnn_relu_data::name, rnn_relu_data::overload_name)
      .typed<rnn_relu_data::schema>();
}

// aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> rnn_relu_data::call(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
    
    static auto op = create_rnn_relu_data_typed_handle();
    return op.call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
}

// aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> rnn_relu_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
    
    static auto op = create_rnn_relu_data_typed_handle();
    return op.redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
}

// aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<gru_cell::schema> create_gru_cell_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gru_cell::name, gru_cell::overload_name)
      .typed<gru_cell::schema>();
}

// aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
at::Tensor gru_cell::call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional<at::Tensor> & b_ih, const ::std::optional<at::Tensor> & b_hh) {
    
    static auto op = create_gru_cell_typed_handle();
    return op.call(input, hx, w_ih, w_hh, b_ih, b_hh);
}

// aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
at::Tensor gru_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional<at::Tensor> & b_ih, const ::std::optional<at::Tensor> & b_hh) {
    
    static auto op = create_gru_cell_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh);
}

// aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<quantized_lstm_cell::schema> create_quantized_lstm_cell_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantized_lstm_cell::name, quantized_lstm_cell::overload_name)
      .typed<quantized_lstm_cell::schema>();
}

// aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> quantized_lstm_cell::call(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
    
    static auto op = create_quantized_lstm_cell_typed_handle();
    return op.call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
}

// aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> quantized_lstm_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
    
    static auto op = create_quantized_lstm_cell_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
}

// aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set__source_Storage::schema> create_set__source_Storage_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set__source_Storage::name, set__source_Storage::overload_name)
      .typed<set__source_Storage::schema>();
}

// aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)
at::Tensor & set__source_Storage::call(at::Tensor & self, at::Storage source) {
    
    static auto op = create_set__source_Storage_typed_handle();
    return op.call(self, source);
}

// aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)
at::Tensor & set__source_Storage::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source) {
    
    static auto op = create_set__source_Storage_typed_handle();
    return op.redispatch(dispatchKeySet, self, source);
}

// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set__source_Storage_storage_offset::schema> create_set__source_Storage_storage_offset_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set__source_Storage_storage_offset::name, set__source_Storage_storage_offset::overload_name)
      .typed<set__source_Storage_storage_offset::schema>();
}

// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
at::Tensor & set__source_Storage_storage_offset::call(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create_set__source_Storage_storage_offset_typed_handle();
    return op.call(self, source, storage_offset, size, stride);
}

// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
at::Tensor & set__source_Storage_storage_offset::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create_set__source_Storage_storage_offset_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, storage_offset, size, stride);
}

// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set__source_Tensor_storage_offset::schema> create_set__source_Tensor_storage_offset_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set__source_Tensor_storage_offset::name, set__source_Tensor_storage_offset::overload_name)
      .typed<set__source_Tensor_storage_offset::schema>();
}

// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
at::Tensor & set__source_Tensor_storage_offset::call(at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create_set__source_Tensor_storage_offset_typed_handle();
    return op.call(self, source, storage_offset, size, stride);
}

// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
at::Tensor & set__source_Tensor_storage_offset::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create_set__source_Tensor_storage_offset_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, storage_offset, size, stride);
}

// aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set__source_Tensor::schema> create_set__source_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set__source_Tensor::name, set__source_Tensor::overload_name)
      .typed<set__source_Tensor::schema>();
}

// aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)
at::Tensor & set__source_Tensor::call(at::Tensor & self, const at::Tensor & source) {
    
    static auto op = create_set__source_Tensor_typed_handle();
    return op.call(self, source);
}

// aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)
at::Tensor & set__source_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & source) {
    
    static auto op = create_set__source_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, source);
}

// aten::set_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set_::schema> create_set__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_::name, set_::overload_name)
      .typed<set_::schema>();
}

// aten::set_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & set_::call(at::Tensor & self) {
    
    static auto op = create_set__typed_handle();
    return op.call(self);
}

// aten::set_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & set_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_set__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<masked_scatter_backward::schema> create_masked_scatter_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_scatter_backward::name, masked_scatter_backward::overload_name)
      .typed<masked_scatter_backward::schema>();
}

// aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor
at::Tensor masked_scatter_backward::call(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes) {
    
    static auto op = create_masked_scatter_backward_typed_handle();
    return op.call(grad_output, mask, sizes);
}

// aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor
at::Tensor masked_scatter_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes) {
    
    static auto op = create_masked_scatter_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, mask, sizes);
}

// aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<put_::schema> create_put__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(put_::name, put_::overload_name)
      .typed<put_::schema>();
}

// aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)
at::Tensor & put_::call(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
    
    static auto op = create_put__typed_handle();
    return op.call(self, index, source, accumulate);
}

// aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)
at::Tensor & put_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
    
    static auto op = create_put__typed_handle();
    return op.redispatch(dispatchKeySet, self, index, source, accumulate);
}

// aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<put::schema> create_put_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(put::name, put::overload_name)
      .typed<put::schema>();
}

// aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor
at::Tensor put::call(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
    
    static auto op = create_put_typed_handle();
    return op.call(self, index, source, accumulate);
}

// aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor
at::Tensor put::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
    
    static auto op = create_put_typed_handle();
    return op.redispatch(dispatchKeySet, self, index, source, accumulate);
}

// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_src::schema> create_scatter_src_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_src::name, scatter_src::overload_name)
      .typed<scatter_src::schema>();
}

// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
at::Tensor scatter_src::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter_src_typed_handle();
    return op.call(self, dim, index, src);
}

// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
at::Tensor scatter_src::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter_src_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src);
}

// aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter__src::schema> create_scatter__src_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter__src::name, scatter__src::overload_name)
      .typed<scatter__src::schema>();
}

// aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
at::Tensor & scatter__src::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter__src_typed_handle();
    return op.call(self, dim, index, src);
}

// aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
at::Tensor & scatter__src::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter__src_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src);
}

// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter_src_out::schema> create_scatter_src_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_src_out::name, scatter_src_out::overload_name)
      .typed<scatter_src_out::schema>();
}

// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_src_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
    
    static auto op = create_scatter_src_out_typed_handle();
    return op.call(self, dim, index, src, out);
}

// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_src_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
    
    static auto op = create_scatter_src_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src, out);
}

// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_value::schema> create_scatter_value_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_value::name, scatter_value::overload_name)
      .typed<scatter_value::schema>();
}

// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
at::Tensor scatter_value::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_scatter_value_typed_handle();
    return op.call(self, dim, index, value);
}

// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
at::Tensor scatter_value::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_scatter_value_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value);
}

// aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter__value::schema> create_scatter__value_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter__value::name, scatter__value::overload_name)
      .typed<scatter__value::schema>();
}

// aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
at::Tensor & scatter__value::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_scatter__value_typed_handle();
    return op.call(self, dim, index, value);
}

// aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
at::Tensor & scatter__value::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_scatter__value_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value);
}

// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter_value_out::schema> create_scatter_value_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_value_out::name, scatter_value_out::overload_name)
      .typed<scatter_value_out::schema>();
}

// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_value_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_scatter_value_out_typed_handle();
    return op.call(self, dim, index, value, out);
}

// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_value_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_scatter_value_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value, out);
}

// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_reduce::schema> create_scatter_reduce_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_reduce::name, scatter_reduce::overload_name)
      .typed<scatter_reduce::schema>();
}

// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor
at::Tensor scatter_reduce::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
    
    static auto op = create_scatter_reduce_typed_handle();
    return op.call(self, dim, index, src, reduce);
}

// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor
at::Tensor scatter_reduce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
    
    static auto op = create_scatter_reduce_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src, reduce);
}

// aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter__reduce::schema> create_scatter__reduce_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter__reduce::name, scatter__reduce::overload_name)
      .typed<scatter__reduce::schema>();
}

// aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)
at::Tensor & scatter__reduce::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
    
    static auto op = create_scatter__reduce_typed_handle();
    return op.call(self, dim, index, src, reduce);
}

// aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)
at::Tensor & scatter__reduce::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
    
    static auto op = create_scatter__reduce_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src, reduce);
}

// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter_reduce_out::schema> create_scatter_reduce_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_reduce_out::name, scatter_reduce_out::overload_name)
      .typed<scatter_reduce_out::schema>();
}

// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_reduce_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out) {
    
    static auto op = create_scatter_reduce_out_typed_handle();
    return op.call(self, dim, index, src, reduce, out);
}

// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_reduce_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out) {
    
    static auto op = create_scatter_reduce_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src, reduce, out);
}

// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_value_reduce::schema> create_scatter_value_reduce_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_value_reduce::name, scatter_value_reduce::overload_name)
      .typed<scatter_value_reduce::schema>();
}

// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor
at::Tensor scatter_value_reduce::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
    
    static auto op = create_scatter_value_reduce_typed_handle();
    return op.call(self, dim, index, value, reduce);
}

// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor
at::Tensor scatter_value_reduce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
    
    static auto op = create_scatter_value_reduce_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value, reduce);
}

// aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter__value_reduce::schema> create_scatter__value_reduce_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter__value_reduce::name, scatter__value_reduce::overload_name)
      .typed<scatter__value_reduce::schema>();
}

// aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)
at::Tensor & scatter__value_reduce::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
    
    static auto op = create_scatter__value_reduce_typed_handle();
    return op.call(self, dim, index, value, reduce);
}

// aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)
at::Tensor & scatter__value_reduce::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
    
    static auto op = create_scatter__value_reduce_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value, reduce);
}

// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter_value_reduce_out::schema> create_scatter_value_reduce_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_value_reduce_out::name, scatter_value_reduce_out::overload_name)
      .typed<scatter_value_reduce_out::schema>();
}

// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_value_reduce_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out) {
    
    static auto op = create_scatter_value_reduce_out_typed_handle();
    return op.call(self, dim, index, value, reduce, out);
}

// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_value_reduce_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out) {
    
    static auto op = create_scatter_value_reduce_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value, reduce, out);
}

// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_dimname_src::schema> create_scatter_dimname_src_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_dimname_src::name, scatter_dimname_src::overload_name)
      .typed<scatter_dimname_src::schema>();
}

// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
at::Tensor scatter_dimname_src::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter_dimname_src_typed_handle();
    return op.call(self, dim, index, src);
}

// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
at::Tensor scatter_dimname_src::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter_dimname_src_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src);
}

// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_dimname_value::schema> create_scatter_dimname_value_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_dimname_value::name, scatter_dimname_value::overload_name)
      .typed<scatter_dimname_value::schema>();
}

// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
at::Tensor scatter_dimname_value::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_scatter_dimname_value_typed_handle();
    return op.call(self, dim, index, value);
}

// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
at::Tensor scatter_dimname_value::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_scatter_dimname_value_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value);
}

// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_reduce_two::schema> create_scatter_reduce_two_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_reduce_two::name, scatter_reduce_two::overload_name)
      .typed<scatter_reduce_two::schema>();
}

// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor
at::Tensor scatter_reduce_two::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
    
    static auto op = create_scatter_reduce_two_typed_handle();
    return op.call(self, dim, index, src, reduce, include_self);
}

// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor
at::Tensor scatter_reduce_two::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
    
    static auto op = create_scatter_reduce_two_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src, reduce, include_self);
}

// aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter_reduce__two::schema> create_scatter_reduce__two_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_reduce__two::name, scatter_reduce__two::overload_name)
      .typed<scatter_reduce__two::schema>();
}

// aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)
at::Tensor & scatter_reduce__two::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
    
    static auto op = create_scatter_reduce__two_typed_handle();
    return op.call(self, dim, index, src, reduce, include_self);
}

// aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)
at::Tensor & scatter_reduce__two::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
    
    static auto op = create_scatter_reduce__two_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src, reduce, include_self);
}

// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter_reduce_two_out::schema> create_scatter_reduce_two_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_reduce_two_out::name, scatter_reduce_two_out::overload_name)
      .typed<scatter_reduce_two_out::schema>();
}

// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_reduce_two_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out) {
    
    static auto op = create_scatter_reduce_two_out_typed_handle();
    return op.call(self, dim, index, src, reduce, include_self, out);
}

// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_reduce_two_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out) {
    
    static auto op = create_scatter_reduce_two_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src, reduce, include_self, out);
}

// aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<__and___Scalar::schema> create___and___Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__and___Scalar::name, __and___Scalar::overload_name)
      .typed<__and___Scalar::schema>();
}

// aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor __and___Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___and___Scalar_typed_handle();
    return op.call(self, other);
}

// aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor __and___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___and___Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<__and___Tensor::schema> create___and___Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__and___Tensor::name, __and___Tensor::overload_name)
      .typed<__and___Tensor::schema>();
}

// aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor __and___Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___and___Tensor_typed_handle();
    return op.call(self, other);
}

// aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor __and___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___and___Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__iand___Scalar::schema> create___iand___Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__iand___Scalar::name, __iand___Scalar::overload_name)
      .typed<__iand___Scalar::schema>();
}

// aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & __iand___Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___iand___Scalar_typed_handle();
    return op.call(self, other);
}

// aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & __iand___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___iand___Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__iand___Tensor::schema> create___iand___Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__iand___Tensor::name, __iand___Tensor::overload_name)
      .typed<__iand___Tensor::schema>();
}

// aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & __iand___Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___iand___Tensor_typed_handle();
    return op.call(self, other);
}

// aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & __iand___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___iand___Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<tril_::schema> create_tril__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tril_::name, tril_::overload_name)
      .typed<tril_::schema>();
}

// aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
at::Tensor & tril_::call(at::Tensor & self, int64_t diagonal) {
    
    static auto op = create_tril__typed_handle();
    return op.call(self, diagonal);
}

// aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
at::Tensor & tril_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t diagonal) {
    
    static auto op = create_tril__typed_handle();
    return op.redispatch(dispatchKeySet, self, diagonal);
}

// aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<uniform_::schema> create_uniform__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(uniform_::name, uniform_::overload_name)
      .typed<uniform_::schema>();
}

// aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & uniform_::call(at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_uniform__typed_handle();
    return op.call(self, from, to, generator);
}

// aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & uniform_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_uniform__typed_handle();
    return op.redispatch(dispatchKeySet, self, from, to, generator);
}

// aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<tril_out::schema> create_tril_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tril_out::name, tril_out::overload_name)
      .typed<tril_out::schema>();
}

// aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & tril_out::call(const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
    
    static auto op = create_tril_out_typed_handle();
    return op.call(self, diagonal, out);
}

// aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & tril_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
    
    static auto op = create_tril_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, diagonal, out);
}

// aten::tril(Tensor self, int diagonal=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<tril::schema> create_tril_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tril::name, tril::overload_name)
      .typed<tril::schema>();
}

// aten::tril(Tensor self, int diagonal=0) -> Tensor
at::Tensor tril::call(const at::Tensor & self, int64_t diagonal) {
    
    static auto op = create_tril_typed_handle();
    return op.call(self, diagonal);
}

// aten::tril(Tensor self, int diagonal=0) -> Tensor
at::Tensor tril::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal) {
    
    static auto op = create_tril_typed_handle();
    return op.redispatch(dispatchKeySet, self, diagonal);
}

// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<tril_indices::schema> create_tril_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tril_indices::name, tril_indices::overload_name)
      .typed<tril_indices::schema>();
}

// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor tril_indices::call(int64_t row, int64_t col, int64_t offset, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_tril_indices_typed_handle();
    return op.call(row, col, offset, dtype, layout, device, pin_memory);
}

// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor tril_indices::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_tril_indices_typed_handle();
    return op.redispatch(dispatchKeySet, row, col, offset, dtype, layout, device, pin_memory);
}

// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less_equal_Scalar_out::schema> create_less_equal_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_equal_Scalar_out::name, less_equal_Scalar_out::overload_name)
      .typed<less_equal_Scalar_out::schema>();
}

// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_equal_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_less_equal_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_equal_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_less_equal_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<less_equal_Scalar::schema> create_less_equal_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_equal_Scalar::name, less_equal_Scalar::overload_name)
      .typed<less_equal_Scalar::schema>();
}

// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor less_equal_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less_equal_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor less_equal_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less_equal_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less_equal_Tensor_out::schema> create_less_equal_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_equal_Tensor_out::name, less_equal_Tensor_out::overload_name)
      .typed<less_equal_Tensor_out::schema>();
}

// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_equal_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_less_equal_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_equal_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_less_equal_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<less_equal_Tensor::schema> create_less_equal_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_equal_Tensor::name, less_equal_Tensor::overload_name)
      .typed<less_equal_Tensor::schema>();
}

// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor less_equal_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less_equal_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor less_equal_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less_equal_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less_equal__Scalar::schema> create_less_equal__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_equal__Scalar::name, less_equal__Scalar::overload_name)
      .typed<less_equal__Scalar::schema>();
}

// aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & less_equal__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less_equal__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & less_equal__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less_equal__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less_equal__Tensor::schema> create_less_equal__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_equal__Tensor::name, less_equal__Tensor::overload_name)
      .typed<less_equal__Tensor::schema>();
}

// aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & less_equal__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less_equal__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & less_equal__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less_equal__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gt_Scalar_out::schema> create_gt_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gt_Scalar_out::name, gt_Scalar_out::overload_name)
      .typed<gt_Scalar_out::schema>();
}

// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gt_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_gt_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gt_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_gt_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<gt_Scalar::schema> create_gt_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gt_Scalar::name, gt_Scalar::overload_name)
      .typed<gt_Scalar::schema>();
}

// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor gt_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_gt_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor gt_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_gt_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gt_Tensor_out::schema> create_gt_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gt_Tensor_out::name, gt_Tensor_out::overload_name)
      .typed<gt_Tensor_out::schema>();
}

// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gt_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_gt_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gt_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_gt_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<gt_Tensor::schema> create_gt_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gt_Tensor::name, gt_Tensor::overload_name)
      .typed<gt_Tensor::schema>();
}

// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor gt_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gt_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor gt_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gt_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gt__Scalar::schema> create_gt__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gt__Scalar::name, gt__Scalar::overload_name)
      .typed<gt__Scalar::schema>();
}

// aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & gt__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_gt__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & gt__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_gt__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gt__Tensor::schema> create_gt__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gt__Tensor::name, gt__Tensor::overload_name)
      .typed<gt__Tensor::schema>();
}

// aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & gt__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gt__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & gt__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gt__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lt_Scalar_out::schema> create_lt_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lt_Scalar_out::name, lt_Scalar_out::overload_name)
      .typed<lt_Scalar_out::schema>();
}

// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lt_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_lt_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lt_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_lt_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<lt_Scalar::schema> create_lt_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lt_Scalar::name, lt_Scalar::overload_name)
      .typed<lt_Scalar::schema>();
}

// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor lt_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_lt_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor lt_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_lt_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lt_Tensor_out::schema> create_lt_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lt_Tensor_out::name, lt_Tensor_out::overload_name)
      .typed<lt_Tensor_out::schema>();
}

// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lt_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_lt_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lt_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_lt_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<lt_Tensor::schema> create_lt_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lt_Tensor::name, lt_Tensor::overload_name)
      .typed<lt_Tensor::schema>();
}

// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor lt_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_lt_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor lt_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_lt_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lt__Scalar::schema> create_lt__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lt__Scalar::name, lt__Scalar::overload_name)
      .typed<lt__Scalar::schema>();
}

// aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & lt__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_lt__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & lt__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_lt__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lt__Tensor::schema> create_lt__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lt__Tensor::name, lt__Tensor::overload_name)
      .typed<lt__Tensor::schema>();
}

// aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & lt__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_lt__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & lt__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_lt__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less_Scalar_out::schema> create_less_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_Scalar_out::name, less_Scalar_out::overload_name)
      .typed<less_Scalar_out::schema>();
}

// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_less_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_less_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::less.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<less_Scalar::schema> create_less_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_Scalar::name, less_Scalar::overload_name)
      .typed<less_Scalar::schema>();
}

// aten::less.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor less_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::less.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor less_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less_Tensor_out::schema> create_less_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_Tensor_out::name, less_Tensor_out::overload_name)
      .typed<less_Tensor_out::schema>();
}

// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_less_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_less_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::less.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<less_Tensor::schema> create_less_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_Tensor::name, less_Tensor::overload_name)
      .typed<less_Tensor::schema>();
}

// aten::less.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor less_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::less.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor less_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less__Scalar::schema> create_less__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less__Scalar::name, less__Scalar::overload_name)
      .typed<less__Scalar::schema>();
}

// aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & less__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & less__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less__Tensor::schema> create_less__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less__Tensor::name, less__Tensor::overload_name)
      .typed<less__Tensor::schema>();
}

// aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & less__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & less__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<masked_select_out::schema> create_masked_select_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_select_out::name, masked_select_out::overload_name)
      .typed<masked_select_out::schema>();
}

// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & masked_select_out::call(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
    
    static auto op = create_masked_select_out_typed_handle();
    return op.call(self, mask, out);
}

// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & masked_select_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
    
    static auto op = create_masked_select_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, out);
}

// aten::masked_select(Tensor self, Tensor mask) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<masked_select::schema> create_masked_select_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_select::name, masked_select::overload_name)
      .typed<masked_select::schema>();
}

// aten::masked_select(Tensor self, Tensor mask) -> Tensor
at::Tensor masked_select::call(const at::Tensor & self, const at::Tensor & mask) {
    
    static auto op = create_masked_select_typed_handle();
    return op.call(self, mask);
}

// aten::masked_select(Tensor self, Tensor mask) -> Tensor
at::Tensor masked_select::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask) {
    
    static auto op = create_masked_select_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask);
}

// aten::nonzero_static.out(Tensor self, *, SymInt size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nonzero_static_out::schema> create_nonzero_static_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nonzero_static_out::name, nonzero_static_out::overload_name)
      .typed<nonzero_static_out::schema>();
}

// aten::nonzero_static.out(Tensor self, *, SymInt size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nonzero_static_out::call(const at::Tensor & self, c10::SymInt size, int64_t fill_value, at::Tensor & out) {
    
    static auto op = create_nonzero_static_out_typed_handle();
    return op.call(self, size, fill_value, out);
}

// aten::nonzero_static.out(Tensor self, *, SymInt size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nonzero_static_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt size, int64_t fill_value, at::Tensor & out) {
    
    static auto op = create_nonzero_static_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, fill_value, out);
}

// aten::nonzero_static(Tensor self, *, SymInt size, int fill_value=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nonzero_static::schema> create_nonzero_static_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nonzero_static::name, nonzero_static::overload_name)
      .typed<nonzero_static::schema>();
}

// aten::nonzero_static(Tensor self, *, SymInt size, int fill_value=-1) -> Tensor
at::Tensor nonzero_static::call(const at::Tensor & self, c10::SymInt size, int64_t fill_value) {
    
    static auto op = create_nonzero_static_typed_handle();
    return op.call(self, size, fill_value);
}

// aten::nonzero_static(Tensor self, *, SymInt size, int fill_value=-1) -> Tensor
at::Tensor nonzero_static::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt size, int64_t fill_value) {
    
    static auto op = create_nonzero_static_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, fill_value);
}

// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<addcdiv_out::schema> create_addcdiv_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addcdiv_out::name, addcdiv_out::overload_name)
      .typed<addcdiv_out::schema>();
}

// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & addcdiv_out::call(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_addcdiv_out_typed_handle();
    return op.call(self, tensor1, tensor2, value, out);
}

// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & addcdiv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_addcdiv_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value, out);
}

// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<addcdiv::schema> create_addcdiv_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addcdiv::name, addcdiv::overload_name)
      .typed<addcdiv::schema>();
}

// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
at::Tensor addcdiv::call(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
    
    static auto op = create_addcdiv_typed_handle();
    return op.call(self, tensor1, tensor2, value);
}

// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
at::Tensor addcdiv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
    
    static auto op = create_addcdiv_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
}

// aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<addcdiv_::schema> create_addcdiv__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addcdiv_::name, addcdiv_::overload_name)
      .typed<addcdiv_::schema>();
}

// aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
at::Tensor & addcdiv_::call(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
    
    static auto op = create_addcdiv__typed_handle();
    return op.call(self, tensor1, tensor2, value);
}

// aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
at::Tensor & addcdiv_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
    
    static auto op = create_addcdiv__typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
}

// aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cholesky_solve_helper::schema> create__cholesky_solve_helper_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cholesky_solve_helper::name, _cholesky_solve_helper::overload_name)
      .typed<_cholesky_solve_helper::schema>();
}

// aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor
at::Tensor _cholesky_solve_helper::call(const at::Tensor & self, const at::Tensor & A, bool upper) {
    
    static auto op = create__cholesky_solve_helper_typed_handle();
    return op.call(self, A, upper);
}

// aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor
at::Tensor _cholesky_solve_helper::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper) {
    
    static auto op = create__cholesky_solve_helper_typed_handle();
    return op.redispatch(dispatchKeySet, self, A, upper);
}

// aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cholesky_inverse::schema> create_cholesky_inverse_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cholesky_inverse::name, cholesky_inverse::overload_name)
      .typed<cholesky_inverse::schema>();
}

// aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor
at::Tensor cholesky_inverse::call(const at::Tensor & self, bool upper) {
    
    static auto op = create_cholesky_inverse_typed_handle();
    return op.call(self, upper);
}

// aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor
at::Tensor cholesky_inverse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper) {
    
    static auto op = create_cholesky_inverse_typed_handle();
    return op.redispatch(dispatchKeySet, self, upper);
}

// aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cholesky_inverse_out::schema> create_cholesky_inverse_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cholesky_inverse_out::name, cholesky_inverse_out::overload_name)
      .typed<cholesky_inverse_out::schema>();
}

// aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cholesky_inverse_out::call(const at::Tensor & self, bool upper, at::Tensor & out) {
    
    static auto op = create_cholesky_inverse_out_typed_handle();
    return op.call(self, upper, out);
}

// aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cholesky_inverse_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) {
    
    static auto op = create_cholesky_inverse_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, upper, out);
}

// aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)
static C10_NOINLINE c10::TypedOperatorHandle<_lu_with_info::schema> create__lu_with_info_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_lu_with_info::name, _lu_with_info::overload_name)
      .typed<_lu_with_info::schema>();
}

// aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _lu_with_info::call(const at::Tensor & self, bool pivot, bool check_errors) {
    
    static auto op = create__lu_with_info_typed_handle();
    return op.call(self, pivot, check_errors);
}

// aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _lu_with_info::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool pivot, bool check_errors) {
    
    static auto op = create__lu_with_info_typed_handle();
    return op.redispatch(dispatchKeySet, self, pivot, check_errors);
}

// aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<atan2_out::schema> create_atan2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atan2_out::name, atan2_out::overload_name)
      .typed<atan2_out::schema>();
}

// aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & atan2_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_atan2_out_typed_handle();
    return op.call(self, other, out);
}

// aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & atan2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_atan2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<atan2_::schema> create_atan2__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atan2_::name, atan2_::overload_name)
      .typed<atan2_::schema>();
}

// aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & atan2_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_atan2__typed_handle();
    return op.call(self, other);
}

// aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & atan2_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_atan2__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::atan2(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<atan2::schema> create_atan2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atan2::name, atan2::overload_name)
      .typed<atan2::schema>();
}

// aten::atan2(Tensor self, Tensor other) -> Tensor
at::Tensor atan2::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_atan2_typed_handle();
    return op.call(self, other);
}

// aten::atan2(Tensor self, Tensor other) -> Tensor
at::Tensor atan2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_atan2_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
static C10_NOINLINE c10::TypedOperatorHandle<histogramdd::schema> create_histogramdd_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(histogramdd::name, histogramdd::overload_name)
      .typed<histogramdd::schema>();
}

// aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd::call(const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogramdd_typed_handle();
    return op.call(self, bins, range, weight, density);
}

// aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogramdd_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, range, weight, density);
}

// aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
static C10_NOINLINE c10::TypedOperatorHandle<histogramdd_int_bins::schema> create_histogramdd_int_bins_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(histogramdd_int_bins::name, histogramdd_int_bins::overload_name)
      .typed<histogramdd_int_bins::schema>();
}

// aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_int_bins::call(const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogramdd_int_bins_typed_handle();
    return op.call(self, bins, range, weight, density);
}

// aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_int_bins::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogramdd_int_bins_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, range, weight, density);
}

// aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
static C10_NOINLINE c10::TypedOperatorHandle<histogramdd_TensorList_bins::schema> create_histogramdd_TensorList_bins_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(histogramdd_TensorList_bins::name, histogramdd_TensorList_bins::overload_name)
      .typed<histogramdd_TensorList_bins::schema>();
}

// aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_TensorList_bins::call(const at::Tensor & self, at::TensorList bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogramdd_TensorList_bins_typed_handle();
    return op.call(self, bins, range, weight, density);
}

// aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_TensorList_bins::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogramdd_TensorList_bins_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, range, weight, density);
}

// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hypot_out::schema> create_hypot_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hypot_out::name, hypot_out::overload_name)
      .typed<hypot_out::schema>();
}

// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hypot_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_hypot_out_typed_handle();
    return op.call(self, other, out);
}

// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hypot_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_hypot_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::hypot(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hypot::schema> create_hypot_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hypot::name, hypot::overload_name)
      .typed<hypot::schema>();
}

// aten::hypot(Tensor self, Tensor other) -> Tensor
at::Tensor hypot::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_hypot_typed_handle();
    return op.call(self, other);
}

// aten::hypot(Tensor self, Tensor other) -> Tensor
at::Tensor hypot::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_hypot_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hypot_::schema> create_hypot__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hypot_::name, hypot_::overload_name)
      .typed<hypot_::schema>();
}

// aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & hypot_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_hypot__typed_handle();
    return op.call(self, other);
}

// aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & hypot_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_hypot__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<igammac_out::schema> create_igammac_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(igammac_out::name, igammac_out::overload_name)
      .typed<igammac_out::schema>();
}

// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & igammac_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_igammac_out_typed_handle();
    return op.call(self, other, out);
}

// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & igammac_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_igammac_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::igammac(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<igammac::schema> create_igammac_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(igammac::name, igammac::overload_name)
      .typed<igammac::schema>();
}

// aten::igammac(Tensor self, Tensor other) -> Tensor
at::Tensor igammac::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_igammac_typed_handle();
    return op.call(self, other);
}

// aten::igammac(Tensor self, Tensor other) -> Tensor
at::Tensor igammac::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_igammac_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<igammac_::schema> create_igammac__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(igammac_::name, igammac_::overload_name)
      .typed<igammac_::schema>();
}

// aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & igammac_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_igammac__typed_handle();
    return op.call(self, other);
}

// aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & igammac_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_igammac__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::fmax(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fmax::schema> create_fmax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fmax::name, fmax::overload_name)
      .typed<fmax::schema>();
}

// aten::fmax(Tensor self, Tensor other) -> Tensor
at::Tensor fmax::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_fmax_typed_handle();
    return op.call(self, other);
}

// aten::fmax(Tensor self, Tensor other) -> Tensor
at::Tensor fmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_fmax_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fmax_out::schema> create_fmax_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fmax_out::name, fmax_out::overload_name)
      .typed<fmax_out::schema>();
}

// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fmax_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_fmax_out_typed_handle();
    return op.call(self, other, out);
}

// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_fmax_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort_values::schema> create_sort_values_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort_values::name, sort_values::overload_name)
      .typed<sort_values::schema>();
}

// aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_values::call(const at::Tensor & self, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_values_typed_handle();
    return op.call(self, dim, descending, values, indices);
}

// aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_values_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, descending, values, indices);
}

// aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort_values_stable::schema> create_sort_values_stable_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort_values_stable::name, sort_values_stable::overload_name)
      .typed<sort_values_stable::schema>();
}

// aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_values_stable::call(const at::Tensor & self, ::std::optional<bool> stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_values_stable_typed_handle();
    return op.call(self, stable, dim, descending, values, indices);
}

// aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_values_stable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<bool> stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_values_stable_typed_handle();
    return op.redispatch(dispatchKeySet, self, stable, dim, descending, values, indices);
}

// aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort::schema> create_sort_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort::name, sort::overload_name)
      .typed<sort::schema>();
}

// aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort::call(const at::Tensor & self, int64_t dim, bool descending) {
    
    static auto op = create_sort_typed_handle();
    return op.call(self, dim, descending);
}

// aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool descending) {
    
    static auto op = create_sort_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, descending);
}

// aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort_stable::schema> create_sort_stable_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort_stable::name, sort_stable::overload_name)
      .typed<sort_stable::schema>();
}

// aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort_stable::call(const at::Tensor & self, ::std::optional<bool> stable, int64_t dim, bool descending) {
    
    static auto op = create_sort_stable_typed_handle();
    return op.call(self, stable, dim, descending);
}

// aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort_stable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<bool> stable, int64_t dim, bool descending) {
    
    static auto op = create_sort_stable_typed_handle();
    return op.redispatch(dispatchKeySet, self, stable, dim, descending);
}

// aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort_dimname_values::schema> create_sort_dimname_values_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort_dimname_values::name, sort_dimname_values::overload_name)
      .typed<sort_dimname_values::schema>();
}

// aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_dimname_values::call(const at::Tensor & self, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_dimname_values_typed_handle();
    return op.call(self, dim, descending, values, indices);
}

// aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_dimname_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_dimname_values_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, descending, values, indices);
}

// aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort_dimname_values_stable::schema> create_sort_dimname_values_stable_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort_dimname_values_stable::name, sort_dimname_values_stable::overload_name)
      .typed<sort_dimname_values_stable::schema>();
}

// aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_dimname_values_stable::call(const at::Tensor & self, ::std::optional<bool> stable, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_dimname_values_stable_typed_handle();
    return op.call(self, stable, dim, descending, values, indices);
}

// aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_dimname_values_stable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<bool> stable, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_dimname_values_stable_typed_handle();
    return op.redispatch(dispatchKeySet, self, stable, dim, descending, values, indices);
}

// aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort_dimname::schema> create_sort_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort_dimname::name, sort_dimname::overload_name)
      .typed<sort_dimname::schema>();
}

// aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort_dimname::call(const at::Tensor & self, at::Dimname dim, bool descending) {
    
    static auto op = create_sort_dimname_typed_handle();
    return op.call(self, dim, descending);
}

// aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool descending) {
    
    static auto op = create_sort_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, descending);
}

// aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort_dimname_stable::schema> create_sort_dimname_stable_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort_dimname_stable::name, sort_dimname_stable::overload_name)
      .typed<sort_dimname_stable::schema>();
}

// aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort_dimname_stable::call(const at::Tensor & self, ::std::optional<bool> stable, at::Dimname dim, bool descending) {
    
    static auto op = create_sort_dimname_stable_typed_handle();
    return op.call(self, stable, dim, descending);
}

// aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort_dimname_stable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<bool> stable, at::Dimname dim, bool descending) {
    
    static auto op = create_sort_dimname_stable_typed_handle();
    return op.redispatch(dispatchKeySet, self, stable, dim, descending);
}

// aten::all(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<all::schema> create_all_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all::name, all::overload_name)
      .typed<all::schema>();
}

// aten::all(Tensor self) -> Tensor
at::Tensor all::call(const at::Tensor & self) {
    
    static auto op = create_all_typed_handle();
    return op.call(self);
}

// aten::all(Tensor self) -> Tensor
at::Tensor all::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_all_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<all_all_out::schema> create_all_all_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all_all_out::name, all_all_out::overload_name)
      .typed<all_all_out::schema>();
}

// aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_all_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_all_all_out_typed_handle();
    return op.call(self, out);
}

// aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_all_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_all_all_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_amp_update_scale_::schema> create__amp_update_scale__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_amp_update_scale_::name, _amp_update_scale_::overload_name)
      .typed<_amp_update_scale_::schema>();
}

// aten::_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!)
at::Tensor & _amp_update_scale_::call(at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
    
    static auto op = create__amp_update_scale__typed_handle();
    return op.call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
}

// aten::_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!)
at::Tensor & _amp_update_scale_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
    
    static auto op = create__amp_update_scale__typed_handle();
    return op.redispatch(dispatchKeySet, self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
}

// aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv_Scalar::schema> create__foreach_addcdiv_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv_Scalar::name, _foreach_addcdiv_Scalar::overload_name)
      .typed<_foreach_addcdiv_Scalar::schema>();
}

// aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcdiv_Scalar::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
    
    static auto op = create__foreach_addcdiv_Scalar_typed_handle();
    return op.call(self, tensor1, tensor2, value);
}

// aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcdiv_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
    
    static auto op = create__foreach_addcdiv_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
}

// aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv_ScalarList::schema> create__foreach_addcdiv_ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv_ScalarList::name, _foreach_addcdiv_ScalarList::overload_name)
      .typed<_foreach_addcdiv_ScalarList::schema>();
}

// aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcdiv_ScalarList::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_addcdiv_ScalarList_typed_handle();
    return op.call(self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcdiv_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_addcdiv_ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv_Tensor::schema> create__foreach_addcdiv_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv_Tensor::name, _foreach_addcdiv_Tensor::overload_name)
      .typed<_foreach_addcdiv_Tensor::schema>();
}

// aten::_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcdiv_Tensor::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
    
    static auto op = create__foreach_addcdiv_Tensor_typed_handle();
    return op.call(self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcdiv_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
    
    static auto op = create__foreach_addcdiv_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv__Scalar::schema> create__foreach_addcdiv__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv__Scalar::name, _foreach_addcdiv__Scalar::overload_name)
      .typed<_foreach_addcdiv__Scalar::schema>();
}

// aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
void _foreach_addcdiv__Scalar::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
    
    static auto op = create__foreach_addcdiv__Scalar_typed_handle();
    return op.call(self, tensor1, tensor2, value);
}

// aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
void _foreach_addcdiv__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
    
    static auto op = create__foreach_addcdiv__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
}

// aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv__ScalarList::schema> create__foreach_addcdiv__ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv__ScalarList::name, _foreach_addcdiv__ScalarList::overload_name)
      .typed<_foreach_addcdiv__ScalarList::schema>();
}

// aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
void _foreach_addcdiv__ScalarList::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_addcdiv__ScalarList_typed_handle();
    return op.call(self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
void _foreach_addcdiv__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_addcdiv__ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv__Tensor::schema> create__foreach_addcdiv__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv__Tensor::name, _foreach_addcdiv__Tensor::overload_name)
      .typed<_foreach_addcdiv__Tensor::schema>();
}

// aten::_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
void _foreach_addcdiv__Tensor::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
    
    static auto op = create__foreach_addcdiv__Tensor_typed_handle();
    return op.call(self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
void _foreach_addcdiv__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
    
    static auto op = create__foreach_addcdiv__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
}

// aten::_foreach_exp(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_exp::schema> create__foreach_exp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_exp::name, _foreach_exp::overload_name)
      .typed<_foreach_exp::schema>();
}

// aten::_foreach_exp(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_exp::call(at::TensorList self) {
    
    static auto op = create__foreach_exp_typed_handle();
    return op.call(self);
}

// aten::_foreach_exp(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_exp::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_exp_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_exp_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_exp_::schema> create__foreach_exp__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_exp_::name, _foreach_exp_::overload_name)
      .typed<_foreach_exp_::schema>();
}

// aten::_foreach_exp_(Tensor(a!)[] self) -> ()
void _foreach_exp_::call(at::TensorList self) {
    
    static auto op = create__foreach_exp__typed_handle();
    return op.call(self);
}

// aten::_foreach_exp_(Tensor(a!)[] self) -> ()
void _foreach_exp_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_exp__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_log(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log::schema> create__foreach_log_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log::name, _foreach_log::overload_name)
      .typed<_foreach_log::schema>();
}

// aten::_foreach_log(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_log::call(at::TensorList self) {
    
    static auto op = create__foreach_log_typed_handle();
    return op.call(self);
}

// aten::_foreach_log(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_log::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_log_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_log_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log_::schema> create__foreach_log__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log_::name, _foreach_log_::overload_name)
      .typed<_foreach_log_::schema>();
}

// aten::_foreach_log_(Tensor(a!)[] self) -> ()
void _foreach_log_::call(at::TensorList self) {
    
    static auto op = create__foreach_log__typed_handle();
    return op.call(self);
}

// aten::_foreach_log_(Tensor(a!)[] self) -> ()
void _foreach_log_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_log__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_log1p(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log1p::schema> create__foreach_log1p_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log1p::name, _foreach_log1p::overload_name)
      .typed<_foreach_log1p::schema>();
}

// aten::_foreach_log1p(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_log1p::call(at::TensorList self) {
    
    static auto op = create__foreach_log1p_typed_handle();
    return op.call(self);
}

// aten::_foreach_log1p(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_log1p::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_log1p_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_log1p_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log1p_::schema> create__foreach_log1p__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log1p_::name, _foreach_log1p_::overload_name)
      .typed<_foreach_log1p_::schema>();
}

// aten::_foreach_log1p_(Tensor(a!)[] self) -> ()
void _foreach_log1p_::call(at::TensorList self) {
    
    static auto op = create__foreach_log1p__typed_handle();
    return op.call(self);
}

// aten::_foreach_log1p_(Tensor(a!)[] self) -> ()
void _foreach_log1p_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_log1p__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_neg(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_neg::schema> create__foreach_neg_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_neg::name, _foreach_neg::overload_name)
      .typed<_foreach_neg::schema>();
}

// aten::_foreach_neg(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_neg::call(at::TensorList self) {
    
    static auto op = create__foreach_neg_typed_handle();
    return op.call(self);
}

// aten::_foreach_neg(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_neg::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_neg_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_neg_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_neg_::schema> create__foreach_neg__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_neg_::name, _foreach_neg_::overload_name)
      .typed<_foreach_neg_::schema>();
}

// aten::_foreach_neg_(Tensor(a!)[] self) -> ()
void _foreach_neg_::call(at::TensorList self) {
    
    static auto op = create__foreach_neg__typed_handle();
    return op.call(self);
}

// aten::_foreach_neg_(Tensor(a!)[] self) -> ()
void _foreach_neg_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_neg__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2, ScalarType? dtype=None) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_norm_Scalar::schema> create__foreach_norm_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_norm_Scalar::name, _foreach_norm_Scalar::overload_name)
      .typed<_foreach_norm_Scalar::schema>();
}

// aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2, ScalarType? dtype=None) -> Tensor[]
::std::vector<at::Tensor> _foreach_norm_Scalar::call(at::TensorList self, const at::Scalar & ord, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__foreach_norm_Scalar_typed_handle();
    return op.call(self, ord, dtype);
}

// aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2, ScalarType? dtype=None) -> Tensor[]
::std::vector<at::Tensor> _foreach_norm_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & ord, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__foreach_norm_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dtype);
}

// aten::_foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow_List::schema> create__foreach_pow_List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow_List::name, _foreach_pow_List::overload_name)
      .typed<_foreach_pow_List::schema>();
}

// aten::_foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_List::call(at::TensorList self, at::TensorList exponent) {
    
    static auto op = create__foreach_pow_List_typed_handle();
    return op.call(self, exponent);
}

// aten::_foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList exponent) {
    
    static auto op = create__foreach_pow_List_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::_foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow_Scalar::schema> create__foreach_pow_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow_Scalar::name, _foreach_pow_Scalar::overload_name)
      .typed<_foreach_pow_Scalar::schema>();
}

// aten::_foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_Scalar::call(at::TensorList self, const at::Scalar & exponent) {
    
    static auto op = create__foreach_pow_Scalar_typed_handle();
    return op.call(self, exponent);
}

// aten::_foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & exponent) {
    
    static auto op = create__foreach_pow_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::_foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow_ScalarList::schema> create__foreach_pow_ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow_ScalarList::name, _foreach_pow_ScalarList::overload_name)
      .typed<_foreach_pow_ScalarList::schema>();
}

// aten::_foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
    
    static auto op = create__foreach_pow_ScalarList_typed_handle();
    return op.call(self, exponent);
}

// aten::_foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
    
    static auto op = create__foreach_pow_ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::_foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow_ScalarAndTensor::schema> create__foreach_pow_ScalarAndTensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow_ScalarAndTensor::name, _foreach_pow_ScalarAndTensor::overload_name)
      .typed<_foreach_pow_ScalarAndTensor::schema>();
}

// aten::_foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_ScalarAndTensor::call(const at::Scalar & self, at::TensorList exponent) {
    
    static auto op = create__foreach_pow_ScalarAndTensor_typed_handle();
    return op.call(self, exponent);
}

// aten::_foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_ScalarAndTensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, at::TensorList exponent) {
    
    static auto op = create__foreach_pow_ScalarAndTensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::_foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow__List::schema> create__foreach_pow__List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow__List::name, _foreach_pow__List::overload_name)
      .typed<_foreach_pow__List::schema>();
}

// aten::_foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> ()
void _foreach_pow__List::call(at::TensorList self, at::TensorList exponent) {
    
    static auto op = create__foreach_pow__List_typed_handle();
    return op.call(self, exponent);
}

// aten::_foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> ()
void _foreach_pow__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList exponent) {
    
    static auto op = create__foreach_pow__List_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::_foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow__Scalar::schema> create__foreach_pow__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow__Scalar::name, _foreach_pow__Scalar::overload_name)
      .typed<_foreach_pow__Scalar::schema>();
}

// aten::_foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> ()
void _foreach_pow__Scalar::call(at::TensorList self, const at::Scalar & exponent) {
    
    static auto op = create__foreach_pow__Scalar_typed_handle();
    return op.call(self, exponent);
}

// aten::_foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> ()
void _foreach_pow__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & exponent) {
    
    static auto op = create__foreach_pow__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::_foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow__ScalarList::schema> create__foreach_pow__ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow__ScalarList::name, _foreach_pow__ScalarList::overload_name)
      .typed<_foreach_pow__ScalarList::schema>();
}

// aten::_foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> ()
void _foreach_pow__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
    
    static auto op = create__foreach_pow__ScalarList_typed_handle();
    return op.call(self, exponent);
}

// aten::_foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> ()
void _foreach_pow__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
    
    static auto op = create__foreach_pow__ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::_foreach_reciprocal(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_reciprocal::schema> create__foreach_reciprocal_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_reciprocal::name, _foreach_reciprocal::overload_name)
      .typed<_foreach_reciprocal::schema>();
}

// aten::_foreach_reciprocal(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_reciprocal::call(at::TensorList self) {
    
    static auto op = create__foreach_reciprocal_typed_handle();
    return op.call(self);
}

// aten::_foreach_reciprocal(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_reciprocal::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_reciprocal_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_reciprocal_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_reciprocal_::schema> create__foreach_reciprocal__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_reciprocal_::name, _foreach_reciprocal_::overload_name)
      .typed<_foreach_reciprocal_::schema>();
}

// aten::_foreach_reciprocal_(Tensor(a!)[] self) -> ()
void _foreach_reciprocal_::call(at::TensorList self) {
    
    static auto op = create__foreach_reciprocal__typed_handle();
    return op.call(self);
}

// aten::_foreach_reciprocal_(Tensor(a!)[] self) -> ()
void _foreach_reciprocal_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_reciprocal__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_rsqrt(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_rsqrt::schema> create__foreach_rsqrt_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_rsqrt::name, _foreach_rsqrt::overload_name)
      .typed<_foreach_rsqrt::schema>();
}

// aten::_foreach_rsqrt(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_rsqrt::call(at::TensorList self) {
    
    static auto op = create__foreach_rsqrt_typed_handle();
    return op.call(self);
}

// aten::_foreach_rsqrt(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_rsqrt::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_rsqrt_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_rsqrt_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_rsqrt_::schema> create__foreach_rsqrt__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_rsqrt_::name, _foreach_rsqrt_::overload_name)
      .typed<_foreach_rsqrt_::schema>();
}

// aten::_foreach_rsqrt_(Tensor(a!)[] self) -> ()
void _foreach_rsqrt_::call(at::TensorList self) {
    
    static auto op = create__foreach_rsqrt__typed_handle();
    return op.call(self);
}

// aten::_foreach_rsqrt_(Tensor(a!)[] self) -> ()
void _foreach_rsqrt_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_rsqrt__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sigmoid(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sigmoid::schema> create__foreach_sigmoid_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sigmoid::name, _foreach_sigmoid::overload_name)
      .typed<_foreach_sigmoid::schema>();
}

// aten::_foreach_sigmoid(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sigmoid::call(at::TensorList self) {
    
    static auto op = create__foreach_sigmoid_typed_handle();
    return op.call(self);
}

// aten::_foreach_sigmoid(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sigmoid::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sigmoid_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sigmoid_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sigmoid_::schema> create__foreach_sigmoid__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sigmoid_::name, _foreach_sigmoid_::overload_name)
      .typed<_foreach_sigmoid_::schema>();
}

// aten::_foreach_sigmoid_(Tensor(a!)[] self) -> ()
void _foreach_sigmoid_::call(at::TensorList self) {
    
    static auto op = create__foreach_sigmoid__typed_handle();
    return op.call(self);
}

// aten::_foreach_sigmoid_(Tensor(a!)[] self) -> ()
void _foreach_sigmoid_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sigmoid__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sin(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sin::schema> create__foreach_sin_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sin::name, _foreach_sin::overload_name)
      .typed<_foreach_sin::schema>();
}

// aten::_foreach_sin(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sin::call(at::TensorList self) {
    
    static auto op = create__foreach_sin_typed_handle();
    return op.call(self);
}

// aten::_foreach_sin(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sin::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sin_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sin_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sin_::schema> create__foreach_sin__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sin_::name, _foreach_sin_::overload_name)
      .typed<_foreach_sin_::schema>();
}

// aten::_foreach_sin_(Tensor(a!)[] self) -> ()
void _foreach_sin_::call(at::TensorList self) {
    
    static auto op = create__foreach_sin__typed_handle();
    return op.call(self);
}

// aten::_foreach_sin_(Tensor(a!)[] self) -> ()
void _foreach_sin_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sin__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sqrt(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sqrt::schema> create__foreach_sqrt_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sqrt::name, _foreach_sqrt::overload_name)
      .typed<_foreach_sqrt::schema>();
}

// aten::_foreach_sqrt(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sqrt::call(at::TensorList self) {
    
    static auto op = create__foreach_sqrt_typed_handle();
    return op.call(self);
}

// aten::_foreach_sqrt(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sqrt::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sqrt_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sqrt_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sqrt_::schema> create__foreach_sqrt__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sqrt_::name, _foreach_sqrt_::overload_name)
      .typed<_foreach_sqrt_::schema>();
}

// aten::_foreach_sqrt_(Tensor(a!)[] self) -> ()
void _foreach_sqrt_::call(at::TensorList self) {
    
    static auto op = create__foreach_sqrt__typed_handle();
    return op.call(self);
}

// aten::_foreach_sqrt_(Tensor(a!)[] self) -> ()
void _foreach_sqrt_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sqrt__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_convert_indices_from_coo_to_csr::schema> create__convert_indices_from_coo_to_csr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_convert_indices_from_coo_to_csr::name, _convert_indices_from_coo_to_csr::overload_name)
      .typed<_convert_indices_from_coo_to_csr::schema>();
}

// aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor
at::Tensor _convert_indices_from_coo_to_csr::call(const at::Tensor & self, int64_t size, bool out_int32) {
    
    static auto op = create__convert_indices_from_coo_to_csr_typed_handle();
    return op.call(self, size, out_int32);
}

// aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor
at::Tensor _convert_indices_from_coo_to_csr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t size, bool out_int32) {
    
    static auto op = create__convert_indices_from_coo_to_csr_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, out_int32);
}

// aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_convert_indices_from_coo_to_csr_out::schema> create__convert_indices_from_coo_to_csr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_convert_indices_from_coo_to_csr_out::name, _convert_indices_from_coo_to_csr_out::overload_name)
      .typed<_convert_indices_from_coo_to_csr_out::schema>();
}

// aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _convert_indices_from_coo_to_csr_out::call(const at::Tensor & self, int64_t size, bool out_int32, at::Tensor & out) {
    
    static auto op = create__convert_indices_from_coo_to_csr_out_typed_handle();
    return op.call(self, size, out_int32, out);
}

// aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _convert_indices_from_coo_to_csr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t size, bool out_int32, at::Tensor & out) {
    
    static auto op = create__convert_indices_from_coo_to_csr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, out_int32, out);
}

// aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<multi_margin_loss_out::schema> create_multi_margin_loss_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multi_margin_loss_out::name, multi_margin_loss_out::overload_name)
      .typed<multi_margin_loss_out::schema>();
}

// aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & multi_margin_loss_out::call(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_multi_margin_loss_out_typed_handle();
    return op.call(self, target, p, margin, weight, reduction, out);
}

// aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & multi_margin_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_multi_margin_loss_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, p, margin, weight, reduction, out);
}

// aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<multi_margin_loss::schema> create_multi_margin_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multi_margin_loss::name, multi_margin_loss::overload_name)
      .typed<multi_margin_loss::schema>();
}

// aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor
at::Tensor multi_margin_loss::call(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
    
    static auto op = create_multi_margin_loss_typed_handle();
    return op.call(self, target, p, margin, weight, reduction);
}

// aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor
at::Tensor multi_margin_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
    
    static auto op = create_multi_margin_loss_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, p, margin, weight, reduction);
}

// aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<multilabel_margin_loss_out::schema> create_multilabel_margin_loss_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multilabel_margin_loss_out::name, multilabel_margin_loss_out::overload_name)
      .typed<multilabel_margin_loss_out::schema>();
}

// aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & multilabel_margin_loss_out::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_multilabel_margin_loss_out_typed_handle();
    return op.call(self, target, reduction, out);
}

// aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & multilabel_margin_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_multilabel_margin_loss_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction, out);
}

// aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<multilabel_margin_loss::schema> create_multilabel_margin_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multilabel_margin_loss::name, multilabel_margin_loss::overload_name)
      .typed<multilabel_margin_loss::schema>();
}

// aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
at::Tensor multilabel_margin_loss::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_multilabel_margin_loss_typed_handle();
    return op.call(self, target, reduction);
}

// aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
at::Tensor multilabel_margin_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_multilabel_margin_loss_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction);
}

// aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<multilabel_margin_loss_forward_output::schema> create_multilabel_margin_loss_forward_output_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multilabel_margin_loss_forward_output::name, multilabel_margin_loss_forward_output::overload_name)
      .typed<multilabel_margin_loss_forward_output::schema>();
}

// aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> multilabel_margin_loss_forward_output::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & output, at::Tensor & is_target) {
    
    static auto op = create_multilabel_margin_loss_forward_output_typed_handle();
    return op.call(self, target, reduction, output, is_target);
}

// aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> multilabel_margin_loss_forward_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & output, at::Tensor & is_target) {
    
    static auto op = create_multilabel_margin_loss_forward_output_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction, output, is_target);
}

// aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)
static C10_NOINLINE c10::TypedOperatorHandle<multilabel_margin_loss_forward::schema> create_multilabel_margin_loss_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multilabel_margin_loss_forward::name, multilabel_margin_loss_forward::overload_name)
      .typed<multilabel_margin_loss_forward::schema>();
}

// aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)
::std::tuple<at::Tensor,at::Tensor> multilabel_margin_loss_forward::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_multilabel_margin_loss_forward_typed_handle();
    return op.call(self, target, reduction);
}

// aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)
::std::tuple<at::Tensor,at::Tensor> multilabel_margin_loss_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_multilabel_margin_loss_forward_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction);
}

// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<nll_loss_forward_output::schema> create_nll_loss_forward_output_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nll_loss_forward_output::name, nll_loss_forward_output::overload_name)
      .typed<nll_loss_forward_output::schema>();
}

// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_output::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
    
    static auto op = create_nll_loss_forward_output_typed_handle();
    return op.call(self, target, weight, reduction, ignore_index, output, total_weight);
}

// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
    
    static auto op = create_nll_loss_forward_output_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight);
}

// aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
static C10_NOINLINE c10::TypedOperatorHandle<nll_loss_forward::schema> create_nll_loss_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nll_loss_forward::name, nll_loss_forward::overload_name)
      .typed<nll_loss_forward::schema>();
}

// aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
::std::tuple<at::Tensor,at::Tensor> nll_loss_forward::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
    
    static auto op = create_nll_loss_forward_typed_handle();
    return op.call(self, target, weight, reduction, ignore_index);
}

// aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
::std::tuple<at::Tensor,at::Tensor> nll_loss_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
    
    static auto op = create_nll_loss_forward_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index);
}

// aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<soft_margin_loss_backward_grad_input::schema> create_soft_margin_loss_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(soft_margin_loss_backward_grad_input::name, soft_margin_loss_backward_grad_input::overload_name)
      .typed<soft_margin_loss_backward_grad_input::schema>();
}

// aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & soft_margin_loss_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
    
    static auto op = create_soft_margin_loss_backward_grad_input_typed_handle();
    return op.call(grad_output, self, target, reduction, grad_input);
}

// aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & soft_margin_loss_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
    
    static auto op = create_soft_margin_loss_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, grad_input);
}

// aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<soft_margin_loss_backward::schema> create_soft_margin_loss_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(soft_margin_loss_backward::name, soft_margin_loss_backward::overload_name)
      .typed<soft_margin_loss_backward::schema>();
}

// aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
at::Tensor soft_margin_loss_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_soft_margin_loss_backward_typed_handle();
    return op.call(grad_output, self, target, reduction);
}

// aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
at::Tensor soft_margin_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_soft_margin_loss_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, reduction);
}

// aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<glu_jvp::schema> create_glu_jvp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(glu_jvp::name, glu_jvp::overload_name)
      .typed<glu_jvp::schema>();
}

// aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor
at::Tensor glu_jvp::call(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
    
    static auto op = create_glu_jvp_typed_handle();
    return op.call(glu, x, dx, dim);
}

// aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor
at::Tensor glu_jvp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
    
    static auto op = create_glu_jvp_typed_handle();
    return op.redispatch(dispatchKeySet, glu, x, dx, dim);
}

// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hardswish_out::schema> create_hardswish_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardswish_out::name, hardswish_out::overload_name)
      .typed<hardswish_out::schema>();
}

// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hardswish_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_hardswish_out_typed_handle();
    return op.call(self, out);
}

// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hardswish_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_hardswish_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::hardswish(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hardswish::schema> create_hardswish_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardswish::name, hardswish::overload_name)
      .typed<hardswish::schema>();
}

// aten::hardswish(Tensor self) -> Tensor
at::Tensor hardswish::call(const at::Tensor & self) {
    
    static auto op = create_hardswish_typed_handle();
    return op.call(self);
}

// aten::hardswish(Tensor self) -> Tensor
at::Tensor hardswish::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_hardswish_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::hardswish_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hardswish_::schema> create_hardswish__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardswish_::name, hardswish_::overload_name)
      .typed<hardswish_::schema>();
}

// aten::hardswish_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & hardswish_::call(at::Tensor & self) {
    
    static auto op = create_hardswish__typed_handle();
    return op.call(self);
}

// aten::hardswish_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & hardswish_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_hardswish__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::rrelu_with_noise.out(Tensor self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rrelu_with_noise_out::schema> create_rrelu_with_noise_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rrelu_with_noise_out::name, rrelu_with_noise_out::overload_name)
      .typed<rrelu_with_noise_out::schema>();
}

// aten::rrelu_with_noise.out(Tensor self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rrelu_with_noise_out::call(const at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_rrelu_with_noise_out_typed_handle();
    return op.call(self, noise, lower, upper, training, generator, out);
}

// aten::rrelu_with_noise.out(Tensor self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rrelu_with_noise_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_rrelu_with_noise_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, noise, lower, upper, training, generator, out);
}

// aten::rrelu_with_noise(Tensor self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rrelu_with_noise::schema> create_rrelu_with_noise_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rrelu_with_noise::name, rrelu_with_noise::overload_name)
      .typed<rrelu_with_noise::schema>();
}

// aten::rrelu_with_noise(Tensor self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
at::Tensor rrelu_with_noise::call(const at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu_with_noise_typed_handle();
    return op.call(self, noise, lower, upper, training, generator);
}

// aten::rrelu_with_noise(Tensor self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
at::Tensor rrelu_with_noise::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu_with_noise_typed_handle();
    return op.redispatch(dispatchKeySet, self, noise, lower, upper, training, generator);
}

// aten::rrelu_with_noise_(Tensor(a!) self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rrelu_with_noise_::schema> create_rrelu_with_noise__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rrelu_with_noise_::name, rrelu_with_noise_::overload_name)
      .typed<rrelu_with_noise_::schema>();
}

// aten::rrelu_with_noise_(Tensor(a!) self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
at::Tensor & rrelu_with_noise_::call(at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu_with_noise__typed_handle();
    return op.call(self, noise, lower, upper, training, generator);
}

// aten::rrelu_with_noise_(Tensor(a!) self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
at::Tensor & rrelu_with_noise_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu_with_noise__typed_handle();
    return op.redispatch(dispatchKeySet, self, noise, lower, upper, training, generator);
}

// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<softshrink_backward_grad_input::schema> create_softshrink_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(softshrink_backward_grad_input::name, softshrink_backward_grad_input::overload_name)
      .typed<softshrink_backward_grad_input::schema>();
}

// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & softshrink_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
    
    static auto op = create_softshrink_backward_grad_input_typed_handle();
    return op.call(grad_output, self, lambd, grad_input);
}

// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & softshrink_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
    
    static auto op = create_softshrink_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, lambd, grad_input);
}

// aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<softshrink_backward::schema> create_softshrink_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(softshrink_backward::name, softshrink_backward::overload_name)
      .typed<softshrink_backward::schema>();
}

// aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor
at::Tensor softshrink_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
    
    static auto op = create_softshrink_backward_typed_handle();
    return op.call(grad_output, self, lambd);
}

// aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor
at::Tensor softshrink_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
    
    static auto op = create_softshrink_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, lambd);
}

// aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool2d_backward::schema> create__adaptive_avg_pool2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_adaptive_avg_pool2d_backward::name, _adaptive_avg_pool2d_backward::overload_name)
      .typed<_adaptive_avg_pool2d_backward::schema>();
}

// aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor _adaptive_avg_pool2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create__adaptive_avg_pool2d_backward_typed_handle();
    return op.call(grad_output, self);
}

// aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor _adaptive_avg_pool2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create__adaptive_avg_pool2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self);
}

// aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<avg_pool2d_out::schema> create_avg_pool2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(avg_pool2d_out::name, avg_pool2d_out::overload_name)
      .typed<avg_pool2d_out::schema>();
}

// aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & avg_pool2d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & out) {
    
    static auto op = create_avg_pool2d_out_typed_handle();
    return op.call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
}

// aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & avg_pool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & out) {
    
    static auto op = create_avg_pool2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
}

// aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<avg_pool2d::schema> create_avg_pool2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(avg_pool2d::name, avg_pool2d::overload_name)
      .typed<avg_pool2d::schema>();
}

// aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
at::Tensor avg_pool2d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
    
    static auto op = create_avg_pool2d_typed_handle();
    return op.call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
}

// aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
at::Tensor avg_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
    
    static auto op = create_avg_pool2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
}

// aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool2d_backward_grad_input::schema> create_fractional_max_pool2d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fractional_max_pool2d_backward_grad_input::name, fractional_max_pool2d_backward_grad_input::overload_name)
      .typed<fractional_max_pool2d_backward_grad_input::schema>();
}

// aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & fractional_max_pool2d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_fractional_max_pool2d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, kernel_size, output_size, indices, grad_input);
}

// aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & fractional_max_pool2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_fractional_max_pool2d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices, grad_input);
}

// aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool2d_backward::schema> create_fractional_max_pool2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fractional_max_pool2d_backward::name, fractional_max_pool2d_backward::overload_name)
      .typed<fractional_max_pool2d_backward::schema>();
}

// aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor
at::Tensor fractional_max_pool2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
    
    static auto op = create_fractional_max_pool2d_backward_typed_handle();
    return op.call(grad_output, self, kernel_size, output_size, indices);
}

// aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor
at::Tensor fractional_max_pool2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
    
    static auto op = create_fractional_max_pool2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices);
}

// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<max_pool3d_with_indices_backward_grad_input::schema> create_max_pool3d_with_indices_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool3d_with_indices_backward_grad_input::name, max_pool3d_with_indices_backward_grad_input::overload_name)
      .typed<max_pool3d_with_indices_backward_grad_input::schema>();
}

// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & max_pool3d_with_indices_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_max_pool3d_with_indices_backward_grad_input_typed_handle();
    return op.call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
}

// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & max_pool3d_with_indices_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_max_pool3d_with_indices_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
}

// aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<max_pool3d_with_indices_backward::schema> create_max_pool3d_with_indices_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool3d_with_indices_backward::name, max_pool3d_with_indices_backward::overload_name)
      .typed<max_pool3d_with_indices_backward::schema>();
}

// aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor
at::Tensor max_pool3d_with_indices_backward::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
    
    static auto op = create_max_pool3d_with_indices_backward_typed_handle();
    return op.call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
}

// aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor
at::Tensor max_pool3d_with_indices_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
    
    static auto op = create_max_pool3d_with_indices_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
}

// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad1d_backward_grad_input::schema> create_reflection_pad1d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad1d_backward_grad_input::name, reflection_pad1d_backward_grad_input::overload_name)
      .typed<reflection_pad1d_backward_grad_input::schema>();
}

// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & reflection_pad1d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_reflection_pad1d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, padding, grad_input);
}

// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & reflection_pad1d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_reflection_pad1d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding, grad_input);
}

// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad1d_backward::schema> create_reflection_pad1d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad1d_backward::name, reflection_pad1d_backward::overload_name)
      .typed<reflection_pad1d_backward::schema>();
}

// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
at::Tensor reflection_pad1d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad1d_backward_typed_handle();
    return op.call(grad_output, self, padding);
}

// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
at::Tensor reflection_pad1d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad1d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding);
}

// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad2d_backward_grad_input::schema> create_reflection_pad2d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad2d_backward_grad_input::name, reflection_pad2d_backward_grad_input::overload_name)
      .typed<reflection_pad2d_backward_grad_input::schema>();
}

// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & reflection_pad2d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_reflection_pad2d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, padding, grad_input);
}

// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & reflection_pad2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_reflection_pad2d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding, grad_input);
}

// aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad2d_backward::schema> create_reflection_pad2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad2d_backward::name, reflection_pad2d_backward::overload_name)
      .typed<reflection_pad2d_backward::schema>();
}

// aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
at::Tensor reflection_pad2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad2d_backward_typed_handle();
    return op.call(grad_output, self, padding);
}

// aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
at::Tensor reflection_pad2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding);
}

// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad1d_backward_grad_input::schema> create_replication_pad1d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad1d_backward_grad_input::name, replication_pad1d_backward_grad_input::overload_name)
      .typed<replication_pad1d_backward_grad_input::schema>();
}

// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & replication_pad1d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_replication_pad1d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, padding, grad_input);
}

// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & replication_pad1d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_replication_pad1d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding, grad_input);
}

// aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad1d_backward::schema> create_replication_pad1d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad1d_backward::name, replication_pad1d_backward::overload_name)
      .typed<replication_pad1d_backward::schema>();
}

// aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
at::Tensor replication_pad1d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad1d_backward_typed_handle();
    return op.call(grad_output, self, padding);
}

// aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
at::Tensor replication_pad1d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad1d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding);
}

// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad3d_backward_grad_input::schema> create_replication_pad3d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad3d_backward_grad_input::name, replication_pad3d_backward_grad_input::overload_name)
      .typed<replication_pad3d_backward_grad_input::schema>();
}

// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & replication_pad3d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_replication_pad3d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, padding, grad_input);
}

// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & replication_pad3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_replication_pad3d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding, grad_input);
}

// aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad3d_backward::schema> create_replication_pad3d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad3d_backward::name, replication_pad3d_backward::overload_name)
      .typed<replication_pad3d_backward::schema>();
}

// aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
at::Tensor replication_pad3d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad3d_backward_typed_handle();
    return op.call(grad_output, self, padding);
}

// aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
at::Tensor replication_pad3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad3d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding);
}

// aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact2d_vec::schema> create__upsample_nearest_exact2d_vec_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact2d_vec::name, _upsample_nearest_exact2d_vec::overload_name)
      .typed<_upsample_nearest_exact2d_vec::schema>();
}

// aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor _upsample_nearest_exact2d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create__upsample_nearest_exact2d_vec_typed_handle();
    return op.call(input, output_size, scale_factors);
}

// aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor _upsample_nearest_exact2d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create__upsample_nearest_exact2d_vec_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, scale_factors);
}

// aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact3d_vec::schema> create__upsample_nearest_exact3d_vec_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact3d_vec::name, _upsample_nearest_exact3d_vec::overload_name)
      .typed<_upsample_nearest_exact3d_vec::schema>();
}

// aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor _upsample_nearest_exact3d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create__upsample_nearest_exact3d_vec_typed_handle();
    return op.call(input, output_size, scale_factors);
}

// aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor _upsample_nearest_exact3d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create__upsample_nearest_exact3d_vec_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, scale_factors);
}

// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_bilinear2d_backward_grad_input::schema> create_upsample_bilinear2d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_bilinear2d_backward_grad_input::name, upsample_bilinear2d_backward_grad_input::overload_name)
      .typed<upsample_bilinear2d_backward_grad_input::schema>();
}

// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_bilinear2d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create_upsample_bilinear2d_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}

// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_bilinear2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create_upsample_bilinear2d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}

// aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_bilinear2d_backward::schema> create_upsample_bilinear2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_bilinear2d_backward::name, upsample_bilinear2d_backward::overload_name)
      .typed<upsample_bilinear2d_backward::schema>();
}

// aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_bilinear2d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_bilinear2d_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}

// aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_bilinear2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_bilinear2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}

// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bicubic2d_aa_backward_grad_input::schema> create__upsample_bicubic2d_aa_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_bicubic2d_aa_backward_grad_input::name, _upsample_bicubic2d_aa_backward_grad_input::overload_name)
      .typed<_upsample_bicubic2d_aa_backward_grad_input::schema>();
}

// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_bicubic2d_aa_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create__upsample_bicubic2d_aa_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}

// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_bicubic2d_aa_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create__upsample_bicubic2d_aa_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}

// aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bicubic2d_aa_backward::schema> create__upsample_bicubic2d_aa_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_bicubic2d_aa_backward::name, _upsample_bicubic2d_aa_backward::overload_name)
      .typed<_upsample_bicubic2d_aa_backward::schema>();
}

// aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_bicubic2d_aa_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_bicubic2d_aa_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}

// aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_bicubic2d_aa_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_bicubic2d_aa_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}

// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact1d_backward_grad_input::schema> create__upsample_nearest_exact1d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact1d_backward_grad_input::name, _upsample_nearest_exact1d_backward_grad_input::overload_name)
      .typed<_upsample_nearest_exact1d_backward_grad_input::schema>();
}

// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact1d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input) {
    
    static auto op = create__upsample_nearest_exact1d_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, scales, grad_input);
}

// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact1d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input) {
    
    static auto op = create__upsample_nearest_exact1d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales, grad_input);
}

// aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact1d_backward::schema> create__upsample_nearest_exact1d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact1d_backward::name, _upsample_nearest_exact1d_backward::overload_name)
      .typed<_upsample_nearest_exact1d_backward::schema>();
}

// aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
at::Tensor _upsample_nearest_exact1d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales) {
    
    static auto op = create__upsample_nearest_exact1d_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, scales);
}

// aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
at::Tensor _upsample_nearest_exact1d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales) {
    
    static auto op = create__upsample_nearest_exact1d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales);
}

// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact2d_out::schema> create__upsample_nearest_exact2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact2d_out::name, _upsample_nearest_exact2d_out::overload_name)
      .typed<_upsample_nearest_exact2d_out::schema>();
}

// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact2d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create__upsample_nearest_exact2d_out_typed_handle();
    return op.call(self, output_size, scales_h, scales_w, out);
}

// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create__upsample_nearest_exact2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales_h, scales_w, out);
}

// aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact2d::schema> create__upsample_nearest_exact2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact2d::name, _upsample_nearest_exact2d::overload_name)
      .typed<_upsample_nearest_exact2d::schema>();
}

// aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_nearest_exact2d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_nearest_exact2d_typed_handle();
    return op.call(self, output_size, scales_h, scales_w);
}

// aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_nearest_exact2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_nearest_exact2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales_h, scales_w);
}

// aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact2d_backward_grad_input::schema> create__upsample_nearest_exact2d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact2d_backward_grad_input::name, _upsample_nearest_exact2d_backward_grad_input::overload_name)
      .typed<_upsample_nearest_exact2d_backward_grad_input::schema>();
}

// aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact2d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create__upsample_nearest_exact2d_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
}

// aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create__upsample_nearest_exact2d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w, grad_input);
}

// aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact2d_backward::schema> create__upsample_nearest_exact2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact2d_backward::name, _upsample_nearest_exact2d_backward::overload_name)
      .typed<_upsample_nearest_exact2d_backward::schema>();
}

// aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_nearest_exact2d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_nearest_exact2d_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, scales_h, scales_w);
}

// aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_nearest_exact2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_nearest_exact2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w);
}

// aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact3d_out::schema> create__upsample_nearest_exact3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact3d_out::name, _upsample_nearest_exact3d_out::overload_name)
      .typed<_upsample_nearest_exact3d_out::schema>();
}

// aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact3d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create__upsample_nearest_exact3d_out_typed_handle();
    return op.call(self, output_size, scales_d, scales_h, scales_w, out);
}

// aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create__upsample_nearest_exact3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w, out);
}

// aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact3d::schema> create__upsample_nearest_exact3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact3d::name, _upsample_nearest_exact3d::overload_name)
      .typed<_upsample_nearest_exact3d::schema>();
}

// aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_nearest_exact3d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_nearest_exact3d_typed_handle();
    return op.call(self, output_size, scales_d, scales_h, scales_w);
}

// aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_nearest_exact3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_nearest_exact3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w);
}

// aten::slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_dilated3d::schema> create_slow_conv_dilated3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slow_conv_dilated3d::name, slow_conv_dilated3d::overload_name)
      .typed<slow_conv_dilated3d::schema>();
}

// aten::slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor
at::Tensor slow_conv_dilated3d::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
    
    static auto op = create_slow_conv_dilated3d_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, dilation);
}

// aten::slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor
at::Tensor slow_conv_dilated3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
    
    static auto op = create_slow_conv_dilated3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation);
}

// aten::isinf(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<isinf::schema> create_isinf_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isinf::name, isinf::overload_name)
      .typed<isinf::schema>();
}

// aten::isinf(Tensor self) -> Tensor
at::Tensor isinf::call(const at::Tensor & self) {
    
    static auto op = create_isinf_typed_handle();
    return op.call(self);
}

// aten::isinf(Tensor self) -> Tensor
at::Tensor isinf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_isinf_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_digamma(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_digamma::schema> create_special_digamma_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_digamma::name, special_digamma::overload_name)
      .typed<special_digamma::schema>();
}

// aten::special_digamma(Tensor self) -> Tensor
at::Tensor special_digamma::call(const at::Tensor & self) {
    
    static auto op = create_special_digamma_typed_handle();
    return op.call(self);
}

// aten::special_digamma(Tensor self) -> Tensor
at::Tensor special_digamma::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_digamma_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_digamma_out::schema> create_special_digamma_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_digamma_out::name, special_digamma_out::overload_name)
      .typed<special_digamma_out::schema>();
}

// aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_digamma_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_digamma_out_typed_handle();
    return op.call(self, out);
}

// aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_digamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_digamma_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_ndtr(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_ndtr::schema> create_special_ndtr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_ndtr::name, special_ndtr::overload_name)
      .typed<special_ndtr::schema>();
}

// aten::special_ndtr(Tensor self) -> Tensor
at::Tensor special_ndtr::call(const at::Tensor & self) {
    
    static auto op = create_special_ndtr_typed_handle();
    return op.call(self);
}

// aten::special_ndtr(Tensor self) -> Tensor
at::Tensor special_ndtr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_ndtr_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_ndtr_out::schema> create_special_ndtr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_ndtr_out::name, special_ndtr_out::overload_name)
      .typed<special_ndtr_out::schema>();
}

// aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_ndtr_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_ndtr_out_typed_handle();
    return op.call(self, out);
}

// aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_ndtr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_ndtr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_zeta(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_zeta::schema> create_special_zeta_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_zeta::name, special_zeta::overload_name)
      .typed<special_zeta::schema>();
}

// aten::special_zeta(Tensor self, Tensor other) -> Tensor
at::Tensor special_zeta::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_special_zeta_typed_handle();
    return op.call(self, other);
}

// aten::special_zeta(Tensor self, Tensor other) -> Tensor
at::Tensor special_zeta::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_special_zeta_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_zeta_self_scalar::schema> create_special_zeta_self_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_zeta_self_scalar::name, special_zeta_self_scalar::overload_name)
      .typed<special_zeta_self_scalar::schema>();
}

// aten::special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor
at::Tensor special_zeta_self_scalar::call(const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_special_zeta_self_scalar_typed_handle();
    return op.call(self, other);
}

// aten::special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor
at::Tensor special_zeta_self_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_special_zeta_self_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_zeta_other_scalar::schema> create_special_zeta_other_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_zeta_other_scalar::name, special_zeta_other_scalar::overload_name)
      .typed<special_zeta_other_scalar::schema>();
}

// aten::special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor
at::Tensor special_zeta_other_scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_special_zeta_other_scalar_typed_handle();
    return op.call(self, other);
}

// aten::special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor
at::Tensor special_zeta_other_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_special_zeta_other_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_zeta_out::schema> create_special_zeta_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_zeta_out::name, special_zeta_out::overload_name)
      .typed<special_zeta_out::schema>();
}

// aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_zeta_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_zeta_out_typed_handle();
    return op.call(self, other, out);
}

// aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_zeta_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_zeta_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_zeta_self_scalar_out::schema> create_special_zeta_self_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_zeta_self_scalar_out::name, special_zeta_self_scalar_out::overload_name)
      .typed<special_zeta_self_scalar_out::schema>();
}

// aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_zeta_self_scalar_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_zeta_self_scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_zeta_self_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_zeta_self_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_zeta_other_scalar_out::schema> create_special_zeta_other_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_zeta_other_scalar_out::name, special_zeta_other_scalar_out::overload_name)
      .typed<special_zeta_other_scalar_out::schema>();
}

// aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_zeta_other_scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_special_zeta_other_scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_zeta_other_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_special_zeta_other_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::special_round(Tensor self, *, int decimals=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_round::schema> create_special_round_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_round::name, special_round::overload_name)
      .typed<special_round::schema>();
}

// aten::special_round(Tensor self, *, int decimals=0) -> Tensor
at::Tensor special_round::call(const at::Tensor & self, int64_t decimals) {
    
    static auto op = create_special_round_typed_handle();
    return op.call(self, decimals);
}

// aten::special_round(Tensor self, *, int decimals=0) -> Tensor
at::Tensor special_round::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals) {
    
    static auto op = create_special_round_typed_handle();
    return op.redispatch(dispatchKeySet, self, decimals);
}

// aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_round_out::schema> create_special_round_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_round_out::name, special_round_out::overload_name)
      .typed<special_round_out::schema>();
}

// aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_round_out::call(const at::Tensor & self, int64_t decimals, at::Tensor & out) {
    
    static auto op = create_special_round_out_typed_handle();
    return op.call(self, decimals, out);
}

// aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_round_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals, at::Tensor & out) {
    
    static auto op = create_special_round_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, decimals, out);
}

// aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_ifft::schema> create_fft_ifft_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ifft::name, fft_ifft::overload_name)
      .typed<fft_ifft::schema>();
}

// aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_ifft::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ifft_typed_handle();
    return op.call(self, n, dim, norm);
}

// aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_ifft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ifft_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm);
}

// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_ifft_out::schema> create_fft_ifft_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ifft_out::name, fft_ifft_out::overload_name)
      .typed<fft_ifft_out::schema>();
}

// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ifft_out::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ifft_out_typed_handle();
    return op.call(self, n, dim, norm, out);
}

// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ifft_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ifft_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm, out);
}

// aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_hfft::schema> create_fft_hfft_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_hfft::name, fft_hfft::overload_name)
      .typed<fft_hfft::schema>();
}

// aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_hfft::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_hfft_typed_handle();
    return op.call(self, n, dim, norm);
}

// aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_hfft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_hfft_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm);
}

// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_hfft_out::schema> create_fft_hfft_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_hfft_out::name, fft_hfft_out::overload_name)
      .typed<fft_hfft_out::schema>();
}

// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_hfft_out::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_hfft_out_typed_handle();
    return op.call(self, n, dim, norm, out);
}

// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_hfft_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_hfft_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm, out);
}

// aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_ihfft::schema> create_fft_ihfft_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ihfft::name, fft_ihfft::overload_name)
      .typed<fft_ihfft::schema>();
}

// aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_ihfft::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ihfft_typed_handle();
    return op.call(self, n, dim, norm);
}

// aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_ihfft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ihfft_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm);
}

// aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_ihfft_out::schema> create_fft_ihfft_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ihfft_out::name, fft_ihfft_out::overload_name)
      .typed<fft_ihfft_out::schema>();
}

// aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ihfft_out::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ihfft_out_typed_handle();
    return op.call(self, n, dim, norm, out);
}

// aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ihfft_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ihfft_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm, out);
}

// aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_ihfft2::schema> create_fft_ihfft2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ihfft2::name, fft_ihfft2::overload_name)
      .typed<fft_ihfft2::schema>();
}

// aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
at::Tensor fft_ihfft2::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ihfft2_typed_handle();
    return op.call(self, s, dim, norm);
}

// aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
at::Tensor fft_ihfft2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ihfft2_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm);
}

// aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_ihfft2_out::schema> create_fft_ihfft2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ihfft2_out::name, fft_ihfft2_out::overload_name)
      .typed<fft_ihfft2_out::schema>();
}

// aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ihfft2_out::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ihfft2_out_typed_handle();
    return op.call(self, s, dim, norm, out);
}

// aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ihfft2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ihfft2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
}

// aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_irfftn::schema> create_fft_irfftn_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_irfftn::name, fft_irfftn::overload_name)
      .typed<fft_irfftn::schema>();
}

// aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
at::Tensor fft_irfftn::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_irfftn_typed_handle();
    return op.call(self, s, dim, norm);
}

// aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
at::Tensor fft_irfftn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_irfftn_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm);
}

// aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_irfftn_out::schema> create_fft_irfftn_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_irfftn_out::name, fft_irfftn_out::overload_name)
      .typed<fft_irfftn_out::schema>();
}

// aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_irfftn_out::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_irfftn_out_typed_handle();
    return op.call(self, s, dim, norm, out);
}

// aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_irfftn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_irfftn_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
}

// aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_ifftshift::schema> create_fft_ifftshift_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ifftshift::name, fft_ifftshift::overload_name)
      .typed<fft_ifftshift::schema>();
}

// aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor
at::Tensor fft_ifftshift::call(const at::Tensor & self, at::OptionalIntArrayRef dim) {
    
    static auto op = create_fft_ifftshift_typed_handle();
    return op.call(self, dim);
}

// aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor
at::Tensor fft_ifftshift::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim) {
    
    static auto op = create_fft_ifftshift_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
static C10_NOINLINE c10::TypedOperatorHandle<slogdet::schema> create_slogdet_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slogdet::name, slogdet::overload_name)
      .typed<slogdet::schema>();
}

// aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
::std::tuple<at::Tensor,at::Tensor> slogdet::call(const at::Tensor & self) {
    
    static auto op = create_slogdet_typed_handle();
    return op.call(self);
}

// aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
::std::tuple<at::Tensor,at::Tensor> slogdet::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_slogdet_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
static C10_NOINLINE c10::TypedOperatorHandle<slogdet_out::schema> create_slogdet_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slogdet_out::name, slogdet_out::overload_name)
      .typed<slogdet_out::schema>();
}

// aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
::std::tuple<at::Tensor &,at::Tensor &> slogdet_out::call(const at::Tensor & self, at::Tensor & sign, at::Tensor & logabsdet) {
    
    static auto op = create_slogdet_out_typed_handle();
    return op.call(self, sign, logabsdet);
}

// aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
::std::tuple<at::Tensor &,at::Tensor &> slogdet_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & sign, at::Tensor & logabsdet) {
    
    static auto op = create_slogdet_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, sign, logabsdet);
}

// aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_eig::schema> create_linalg_eig_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_eig::name, linalg_eig::overload_name)
      .typed<linalg_eig::schema>();
}

// aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)
::std::tuple<at::Tensor,at::Tensor> linalg_eig::call(const at::Tensor & self) {
    
    static auto op = create_linalg_eig_typed_handle();
    return op.call(self);
}

// aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)
::std::tuple<at::Tensor,at::Tensor> linalg_eig::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_linalg_eig_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_eig_out::schema> create_linalg_eig_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_eig_out::name, linalg_eig_out::overload_name)
      .typed<linalg_eig_out::schema>();
}

// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
::std::tuple<at::Tensor &,at::Tensor &> linalg_eig_out::call(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
    
    static auto op = create_linalg_eig_out_typed_handle();
    return op.call(self, eigenvalues, eigenvectors);
}

// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
::std::tuple<at::Tensor &,at::Tensor &> linalg_eig_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
    
    static auto op = create_linalg_eig_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, eigenvalues, eigenvectors);
}

// aten::_linalg_eigvals(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_linalg_eigvals::schema> create__linalg_eigvals_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_linalg_eigvals::name, _linalg_eigvals::overload_name)
      .typed<_linalg_eigvals::schema>();
}

// aten::_linalg_eigvals(Tensor self) -> Tensor
at::Tensor _linalg_eigvals::call(const at::Tensor & self) {
    
    static auto op = create__linalg_eigvals_typed_handle();
    return op.call(self);
}

// aten::_linalg_eigvals(Tensor self) -> Tensor
at::Tensor _linalg_eigvals::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__linalg_eigvals_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_eigh::schema> create_linalg_eigh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_eigh::name, linalg_eigh::overload_name)
      .typed<linalg_eigh::schema>();
}

// aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors)
::std::tuple<at::Tensor,at::Tensor> linalg_eigh::call(const at::Tensor & self, c10::string_view UPLO) {
    
    static auto op = create_linalg_eigh_typed_handle();
    return op.call(self, UPLO);
}

// aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors)
::std::tuple<at::Tensor,at::Tensor> linalg_eigh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO) {
    
    static auto op = create_linalg_eigh_typed_handle();
    return op.redispatch(dispatchKeySet, self, UPLO);
}

// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_eigh_eigvals::schema> create_linalg_eigh_eigvals_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_eigh_eigvals::name, linalg_eigh_eigvals::overload_name)
      .typed<linalg_eigh_eigvals::schema>();
}

// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
::std::tuple<at::Tensor &,at::Tensor &> linalg_eigh_eigvals::call(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs) {
    
    static auto op = create_linalg_eigh_eigvals_typed_handle();
    return op.call(self, UPLO, eigvals, eigvecs);
}

// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
::std::tuple<at::Tensor &,at::Tensor &> linalg_eigh_eigvals::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs) {
    
    static auto op = create_linalg_eigh_eigvals_typed_handle();
    return op.redispatch(dispatchKeySet, self, UPLO, eigvals, eigvecs);
}

// aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_eigvalsh::schema> create_linalg_eigvalsh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_eigvalsh::name, linalg_eigvalsh::overload_name)
      .typed<linalg_eigvalsh::schema>();
}

// aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor
at::Tensor linalg_eigvalsh::call(const at::Tensor & self, c10::string_view UPLO) {
    
    static auto op = create_linalg_eigvalsh_typed_handle();
    return op.call(self, UPLO);
}

// aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor
at::Tensor linalg_eigvalsh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO) {
    
    static auto op = create_linalg_eigvalsh_typed_handle();
    return op.redispatch(dispatchKeySet, self, UPLO);
}

// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_eigvalsh_out::schema> create_linalg_eigvalsh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_eigvalsh_out::name, linalg_eigvalsh_out::overload_name)
      .typed<linalg_eigvalsh_out::schema>();
}

// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_eigvalsh_out::call(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out) {
    
    static auto op = create_linalg_eigvalsh_out_typed_handle();
    return op.call(self, UPLO, out);
}

// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_eigvalsh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & out) {
    
    static auto op = create_linalg_eigvalsh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, UPLO, out);
}

// aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_householder_product::schema> create_linalg_householder_product_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_householder_product::name, linalg_householder_product::overload_name)
      .typed<linalg_householder_product::schema>();
}

// aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor
at::Tensor linalg_householder_product::call(const at::Tensor & input, const at::Tensor & tau) {
    
    static auto op = create_linalg_householder_product_typed_handle();
    return op.call(input, tau);
}

// aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor
at::Tensor linalg_householder_product::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau) {
    
    static auto op = create_linalg_householder_product_typed_handle();
    return op.redispatch(dispatchKeySet, input, tau);
}

// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_householder_product_out::schema> create_linalg_householder_product_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_householder_product_out::name, linalg_householder_product_out::overload_name)
      .typed<linalg_householder_product_out::schema>();
}

// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_householder_product_out::call(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out) {
    
    static auto op = create_linalg_householder_product_out_typed_handle();
    return op.call(input, tau, out);
}

// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_householder_product_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau, at::Tensor & out) {
    
    static auto op = create_linalg_householder_product_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, tau, out);
}

// aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_norm::schema> create_linalg_matrix_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_norm::name, linalg_matrix_norm::overload_name)
      .typed<linalg_matrix_norm::schema>();
}

// aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor linalg_matrix_norm::call(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_linalg_matrix_norm_typed_handle();
    return op.call(self, ord, dim, keepdim, dtype);
}

// aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor linalg_matrix_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_linalg_matrix_norm_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype);
}

// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_norm_out::schema> create_linalg_matrix_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_norm_out::name, linalg_matrix_norm_out::overload_name)
      .typed<linalg_matrix_norm_out::schema>();
}

// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_norm_out::call(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_norm_out_typed_handle();
    return op.call(self, ord, dim, keepdim, dtype, out);
}

// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out);
}

// aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_norm_str_ord::schema> create_linalg_matrix_norm_str_ord_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_norm_str_ord::name, linalg_matrix_norm_str_ord::overload_name)
      .typed<linalg_matrix_norm_str_ord::schema>();
}

// aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor linalg_matrix_norm_str_ord::call(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_linalg_matrix_norm_str_ord_typed_handle();
    return op.call(self, ord, dim, keepdim, dtype);
}

// aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor linalg_matrix_norm_str_ord::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_linalg_matrix_norm_str_ord_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype);
}

// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_norm_str_ord_out::schema> create_linalg_matrix_norm_str_ord_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_norm_str_ord_out::name, linalg_matrix_norm_str_ord_out::overload_name)
      .typed<linalg_matrix_norm_str_ord_out::schema>();
}

// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_norm_str_ord_out::call(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_norm_str_ord_out_typed_handle();
    return op.call(self, ord, dim, keepdim, dtype, out);
}

// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_norm_str_ord_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_norm_str_ord_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out);
}

// aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_svd::schema> create_linalg_svd_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_svd::name, linalg_svd::overload_name)
      .typed<linalg_svd::schema>();
}

// aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_svd::call(const at::Tensor & A, bool full_matrices, ::std::optional<c10::string_view> driver) {
    
    static auto op = create_linalg_svd_typed_handle();
    return op.call(A, full_matrices, driver);
}

// aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_svd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, ::std::optional<c10::string_view> driver) {
    
    static auto op = create_linalg_svd_typed_handle();
    return op.redispatch(dispatchKeySet, A, full_matrices, driver);
}

// aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_svd_U::schema> create_linalg_svd_U_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_svd_U::name, linalg_svd_U::overload_name)
      .typed<linalg_svd_U::schema>();
}

// aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_svd_U::call(const at::Tensor & A, bool full_matrices, ::std::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
    
    static auto op = create_linalg_svd_U_typed_handle();
    return op.call(A, full_matrices, driver, U, S, Vh);
}

// aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_svd_U::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, ::std::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
    
    static auto op = create_linalg_svd_U_typed_handle();
    return op.redispatch(dispatchKeySet, A, full_matrices, driver, U, S, Vh);
}

// aten::_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_test_optional_floatlist::schema> create__test_optional_floatlist_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_optional_floatlist::name, _test_optional_floatlist::overload_name)
      .typed<_test_optional_floatlist::schema>();
}

// aten::_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor
at::Tensor _test_optional_floatlist::call(const at::Tensor & values, ::std::optional<at::ArrayRef<double>> addends) {
    
    static auto op = create__test_optional_floatlist_typed_handle();
    return op.call(values, addends);
}

// aten::_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor
at::Tensor _test_optional_floatlist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, ::std::optional<at::ArrayRef<double>> addends) {
    
    static auto op = create__test_optional_floatlist_typed_handle();
    return op.redispatch(dispatchKeySet, values, addends);
}

// aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<unflatten_dense_tensors::schema> create_unflatten_dense_tensors_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unflatten_dense_tensors::name, unflatten_dense_tensors::overload_name)
      .typed<unflatten_dense_tensors::schema>();
}

// aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> unflatten_dense_tensors::call(const at::Tensor & flat, at::TensorList tensors) {
    
    static auto op = create_unflatten_dense_tensors_typed_handle();
    return op.call(flat, tensors);
}

// aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> unflatten_dense_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & flat, at::TensorList tensors) {
    
    static auto op = create_unflatten_dense_tensors_typed_handle();
    return op.redispatch(dispatchKeySet, flat, tensors);
}

// aten::_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_from_tensor_list::schema> create__nested_tensor_from_tensor_list_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_from_tensor_list::name, _nested_tensor_from_tensor_list::overload_name)
      .typed<_nested_tensor_from_tensor_list::schema>();
}

// aten::_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _nested_tensor_from_tensor_list::call(at::TensorList list, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__nested_tensor_from_tensor_list_typed_handle();
    return op.call(list, dtype, layout, device, pin_memory);
}

// aten::_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _nested_tensor_from_tensor_list::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList list, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__nested_tensor_from_tensor_list_typed_handle();
    return op.redispatch(dispatchKeySet, list, dtype, layout, device, pin_memory);
}

// aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_broadcast_to_copy::schema> create__sparse_broadcast_to_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_broadcast_to_copy::name, _sparse_broadcast_to_copy::overload_name)
      .typed<_sparse_broadcast_to_copy::schema>();
}

// aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor
at::Tensor _sparse_broadcast_to_copy::call(const at::Tensor & self, at::IntArrayRef size) {
    
    static auto op = create__sparse_broadcast_to_copy_typed_handle();
    return op.call(self, size);
}

// aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor
at::Tensor _sparse_broadcast_to_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) {
    
    static auto op = create__sparse_broadcast_to_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, size);
}

// aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<transpose_copy_int::schema> create_transpose_copy_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(transpose_copy_int::name, transpose_copy_int::overload_name)
      .typed<transpose_copy_int::schema>();
}

// aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor
at::Tensor transpose_copy_int::call(const at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create_transpose_copy_int_typed_handle();
    return op.call(self, dim0, dim1);
}

// aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor
at::Tensor transpose_copy_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create_transpose_copy_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim0, dim1);
}

// aten::_indices_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_indices_copy::schema> create__indices_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_indices_copy::name, _indices_copy::overload_name)
      .typed<_indices_copy::schema>();
}

// aten::_indices_copy(Tensor self) -> Tensor
at::Tensor _indices_copy::call(const at::Tensor & self) {
    
    static auto op = create__indices_copy_typed_handle();
    return op.call(self);
}

// aten::_indices_copy(Tensor self) -> Tensor
at::Tensor _indices_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__indices_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_values_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_values_copy::schema> create__values_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_values_copy::name, _values_copy::overload_name)
      .typed<_values_copy::schema>();
}

// aten::_values_copy(Tensor self) -> Tensor
at::Tensor _values_copy::call(const at::Tensor & self) {
    
    static auto op = create__values_copy_typed_handle();
    return op.call(self);
}

// aten::_values_copy(Tensor self) -> Tensor
at::Tensor _values_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__values_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::values_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<values_copy::schema> create_values_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(values_copy::name, values_copy::overload_name)
      .typed<values_copy::schema>();
}

// aten::values_copy(Tensor self) -> Tensor
at::Tensor values_copy::call(const at::Tensor & self) {
    
    static auto op = create_values_copy_typed_handle();
    return op.call(self);
}

// aten::values_copy(Tensor self) -> Tensor
at::Tensor values_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_values_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::view_copy(Tensor self, SymInt[] size) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<view_copy::schema> create_view_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_copy::name, view_copy::overload_name)
      .typed<view_copy::schema>();
}

// aten::view_copy(Tensor self, SymInt[] size) -> Tensor
at::Tensor view_copy::call(const at::Tensor & self, c10::SymIntArrayRef size) {
    
    static auto op = create_view_copy_typed_handle();
    return op.call(self, size);
}

// aten::view_copy(Tensor self, SymInt[] size) -> Tensor
at::Tensor view_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) {
    
    static auto op = create_view_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, size);
}

// aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<view_copy_dtype::schema> create_view_copy_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_copy_dtype::name, view_copy_dtype::overload_name)
      .typed<view_copy_dtype::schema>();
}

// aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor
at::Tensor view_copy_dtype::call(const at::Tensor & self, at::ScalarType dtype) {
    
    static auto op = create_view_copy_dtype_typed_handle();
    return op.call(self, dtype);
}

// aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor
at::Tensor view_copy_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype) {
    
    static auto op = create_view_copy_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype);
}

// aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<unfold_copy::schema> create_unfold_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unfold_copy::name, unfold_copy::overload_name)
      .typed<unfold_copy::schema>();
}

// aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor
at::Tensor unfold_copy::call(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
    
    static auto op = create_unfold_copy_typed_handle();
    return op.call(self, dimension, size, step);
}

// aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor
at::Tensor unfold_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
    
    static auto op = create_unfold_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, dimension, size, step);
}

// aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scaled_dot_product_attention::schema> create_scaled_dot_product_attention_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scaled_dot_product_attention::name, scaled_dot_product_attention::overload_name)
      .typed<scaled_dot_product_attention::schema>();
}

// aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor
at::Tensor scaled_dot_product_attention::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, ::std::optional<double> scale, bool enable_gqa) {
    
    static auto op = create_scaled_dot_product_attention_typed_handle();
    return op.call(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa);
}

// aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor
at::Tensor scaled_dot_product_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, ::std::optional<double> scale, bool enable_gqa) {
    
    static auto op = create_scaled_dot_product_attention_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa);
}

// aten::_scaled_dot_product_fused_attention_overrideable_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor attn_bias, bool[4] grad_input_mask, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value, Tensor grad_attn_bias)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_fused_attention_overrideable_backward::schema> create__scaled_dot_product_fused_attention_overrideable_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_dot_product_fused_attention_overrideable_backward::name, _scaled_dot_product_fused_attention_overrideable_backward::overload_name)
      .typed<_scaled_dot_product_fused_attention_overrideable_backward::schema>();
}

// aten::_scaled_dot_product_fused_attention_overrideable_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor attn_bias, bool[4] grad_input_mask, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value, Tensor grad_attn_bias)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_fused_attention_overrideable_backward::call(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, ::std::array<bool,4> grad_input_mask, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_fused_attention_overrideable_backward_typed_handle();
    return op.call(grad_out, query, key, value, attn_bias, grad_input_mask, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale);
}

// aten::_scaled_dot_product_fused_attention_overrideable_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor attn_bias, bool[4] grad_input_mask, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value, Tensor grad_attn_bias)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_fused_attention_overrideable_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, ::std::array<bool,4> grad_input_mask, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_fused_attention_overrideable_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, query, key, value, attn_bias, grad_input_mask, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale);
}

// aten::_scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_cudnn_attention::schema> create__scaled_dot_product_cudnn_attention_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_dot_product_cudnn_attention::name, _scaled_dot_product_cudnn_attention::overload_name)
      .typed<_scaled_dot_product_cudnn_attention::schema>();
}

// aten::_scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_cudnn_attention_typed_handle();
    return op.call(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, return_debug_mask, scale);
}

// aten::_scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_cudnn_attention_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, return_debug_mask, scale);
}

// aten::_scaled_dot_product_cudnn_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor attn_bias, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, *, float? scale=None) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_cudnn_attention_backward::schema> create__scaled_dot_product_cudnn_attention_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_dot_product_cudnn_attention_backward::name, _scaled_dot_product_cudnn_attention_backward::overload_name)
      .typed<_scaled_dot_product_cudnn_attention_backward::schema>();
}

// aten::_scaled_dot_product_cudnn_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor attn_bias, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, *, float? scale=None) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention_backward::call(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, const at::Tensor & attn_bias, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_cudnn_attention_backward_typed_handle();
    return op.call(grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale);
}

// aten::_scaled_dot_product_cudnn_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor attn_bias, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, *, float? scale=None) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, const at::Tensor & attn_bias, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_cudnn_attention_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale);
}

// aten::special_bessel_y1(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_y1::schema> create_special_bessel_y1_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_bessel_y1::name, special_bessel_y1::overload_name)
      .typed<special_bessel_y1::schema>();
}

// aten::special_bessel_y1(Tensor self) -> Tensor
at::Tensor special_bessel_y1::call(const at::Tensor & self) {
    
    static auto op = create_special_bessel_y1_typed_handle();
    return op.call(self);
}

// aten::special_bessel_y1(Tensor self) -> Tensor
at::Tensor special_bessel_y1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_bessel_y1_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_y1_out::schema> create_special_bessel_y1_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_bessel_y1_out::name, special_bessel_y1_out::overload_name)
      .typed<special_bessel_y1_out::schema>();
}

// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_bessel_y1_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_bessel_y1_out_typed_handle();
    return op.call(self, out);
}

// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_bessel_y1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_bessel_y1_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_laguerre_polynomial_l::schema> create_special_laguerre_polynomial_l_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_laguerre_polynomial_l::name, special_laguerre_polynomial_l::overload_name)
      .typed<special_laguerre_polynomial_l::schema>();
}

// aten::special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor
at::Tensor special_laguerre_polynomial_l::call(const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_laguerre_polynomial_l_typed_handle();
    return op.call(x, n);
}

// aten::special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor
at::Tensor special_laguerre_polynomial_l::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_laguerre_polynomial_l_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_laguerre_polynomial_l_x_scalar::schema> create_special_laguerre_polynomial_l_x_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_laguerre_polynomial_l_x_scalar::name, special_laguerre_polynomial_l_x_scalar::overload_name)
      .typed<special_laguerre_polynomial_l_x_scalar::schema>();
}

// aten::special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_laguerre_polynomial_l_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_laguerre_polynomial_l_x_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_laguerre_polynomial_l_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_laguerre_polynomial_l_x_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_laguerre_polynomial_l_n_scalar::schema> create_special_laguerre_polynomial_l_n_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_laguerre_polynomial_l_n_scalar::name, special_laguerre_polynomial_l_n_scalar::overload_name)
      .typed<special_laguerre_polynomial_l_n_scalar::schema>();
}

// aten::special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_laguerre_polynomial_l_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_laguerre_polynomial_l_n_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_laguerre_polynomial_l_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_laguerre_polynomial_l_n_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_laguerre_polynomial_l_out::schema> create_special_laguerre_polynomial_l_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_laguerre_polynomial_l_out::name, special_laguerre_polynomial_l_out::overload_name)
      .typed<special_laguerre_polynomial_l_out::schema>();
}

// aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_laguerre_polynomial_l_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_laguerre_polynomial_l_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_laguerre_polynomial_l_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_laguerre_polynomial_l_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_laguerre_polynomial_l_x_scalar_out::schema> create_special_laguerre_polynomial_l_x_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_laguerre_polynomial_l_x_scalar_out::name, special_laguerre_polynomial_l_x_scalar_out::overload_name)
      .typed<special_laguerre_polynomial_l_x_scalar_out::schema>();
}

// aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_laguerre_polynomial_l_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_laguerre_polynomial_l_x_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_laguerre_polynomial_l_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_laguerre_polynomial_l_x_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_laguerre_polynomial_l_n_scalar_out::schema> create_special_laguerre_polynomial_l_n_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_laguerre_polynomial_l_n_scalar_out::name, special_laguerre_polynomial_l_n_scalar_out::overload_name)
      .typed<special_laguerre_polynomial_l_n_scalar_out::schema>();
}

// aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_laguerre_polynomial_l_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_laguerre_polynomial_l_n_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_laguerre_polynomial_l_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_laguerre_polynomial_l_n_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_legendre_polynomial_p::schema> create_special_legendre_polynomial_p_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_legendre_polynomial_p::name, special_legendre_polynomial_p::overload_name)
      .typed<special_legendre_polynomial_p::schema>();
}

// aten::special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor
at::Tensor special_legendre_polynomial_p::call(const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_legendre_polynomial_p_typed_handle();
    return op.call(x, n);
}

// aten::special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor
at::Tensor special_legendre_polynomial_p::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_legendre_polynomial_p_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_legendre_polynomial_p_x_scalar::schema> create_special_legendre_polynomial_p_x_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_legendre_polynomial_p_x_scalar::name, special_legendre_polynomial_p_x_scalar::overload_name)
      .typed<special_legendre_polynomial_p_x_scalar::schema>();
}

// aten::special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_legendre_polynomial_p_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_legendre_polynomial_p_x_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_legendre_polynomial_p_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_legendre_polynomial_p_x_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_legendre_polynomial_p_n_scalar::schema> create_special_legendre_polynomial_p_n_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_legendre_polynomial_p_n_scalar::name, special_legendre_polynomial_p_n_scalar::overload_name)
      .typed<special_legendre_polynomial_p_n_scalar::schema>();
}

// aten::special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_legendre_polynomial_p_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_legendre_polynomial_p_n_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_legendre_polynomial_p_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_legendre_polynomial_p_n_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_legendre_polynomial_p_out::schema> create_special_legendre_polynomial_p_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_legendre_polynomial_p_out::name, special_legendre_polynomial_p_out::overload_name)
      .typed<special_legendre_polynomial_p_out::schema>();
}

// aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_legendre_polynomial_p_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_legendre_polynomial_p_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_legendre_polynomial_p_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_legendre_polynomial_p_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_legendre_polynomial_p_x_scalar_out::schema> create_special_legendre_polynomial_p_x_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_legendre_polynomial_p_x_scalar_out::name, special_legendre_polynomial_p_x_scalar_out::overload_name)
      .typed<special_legendre_polynomial_p_x_scalar_out::schema>();
}

// aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_legendre_polynomial_p_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_legendre_polynomial_p_x_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_legendre_polynomial_p_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_legendre_polynomial_p_x_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_legendre_polynomial_p_n_scalar_out::schema> create_special_legendre_polynomial_p_n_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_legendre_polynomial_p_n_scalar_out::name, special_legendre_polynomial_p_n_scalar_out::overload_name)
      .typed<special_legendre_polynomial_p_n_scalar_out::schema>();
}

// aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_legendre_polynomial_p_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_legendre_polynomial_p_n_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_legendre_polynomial_p_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_legendre_polynomial_p_n_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_scaled_modified_bessel_k0(Tensor x) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_scaled_modified_bessel_k0::schema> create_special_scaled_modified_bessel_k0_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_scaled_modified_bessel_k0::name, special_scaled_modified_bessel_k0::overload_name)
      .typed<special_scaled_modified_bessel_k0::schema>();
}

// aten::special_scaled_modified_bessel_k0(Tensor x) -> Tensor
at::Tensor special_scaled_modified_bessel_k0::call(const at::Tensor & x) {
    
    static auto op = create_special_scaled_modified_bessel_k0_typed_handle();
    return op.call(x);
}

// aten::special_scaled_modified_bessel_k0(Tensor x) -> Tensor
at::Tensor special_scaled_modified_bessel_k0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) {
    
    static auto op = create_special_scaled_modified_bessel_k0_typed_handle();
    return op.redispatch(dispatchKeySet, x);
}

// aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_scaled_modified_bessel_k0_out::schema> create_special_scaled_modified_bessel_k0_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_scaled_modified_bessel_k0_out::name, special_scaled_modified_bessel_k0_out::overload_name)
      .typed<special_scaled_modified_bessel_k0_out::schema>();
}

// aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_scaled_modified_bessel_k0_out::call(const at::Tensor & x, at::Tensor & out) {
    
    static auto op = create_special_scaled_modified_bessel_k0_out_typed_handle();
    return op.call(x, out);
}

// aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_scaled_modified_bessel_k0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) {
    
    static auto op = create_special_scaled_modified_bessel_k0_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, out);
}

// aten::special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_u::schema> create_special_shifted_chebyshev_polynomial_u_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_u::name, special_shifted_chebyshev_polynomial_u::overload_name)
      .typed<special_shifted_chebyshev_polynomial_u::schema>();
}

// aten::special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_u::call(const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_typed_handle();
    return op.call(x, n);
}

// aten::special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_u::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_u_x_scalar::schema> create_special_shifted_chebyshev_polynomial_u_x_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_u_x_scalar::name, special_shifted_chebyshev_polynomial_u_x_scalar::overload_name)
      .typed<special_shifted_chebyshev_polynomial_u_x_scalar::schema>();
}

// aten::special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_u_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_x_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_u_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_x_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_u_n_scalar::schema> create_special_shifted_chebyshev_polynomial_u_n_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_u_n_scalar::name, special_shifted_chebyshev_polynomial_u_n_scalar::overload_name)
      .typed<special_shifted_chebyshev_polynomial_u_n_scalar::schema>();
}

// aten::special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_u_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_n_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_u_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_n_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_u_out::schema> create_special_shifted_chebyshev_polynomial_u_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_u_out::name, special_shifted_chebyshev_polynomial_u_out::overload_name)
      .typed<special_shifted_chebyshev_polynomial_u_out::schema>();
}

// aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_u_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_u_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_u_x_scalar_out::schema> create_special_shifted_chebyshev_polynomial_u_x_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_u_x_scalar_out::name, special_shifted_chebyshev_polynomial_u_x_scalar_out::overload_name)
      .typed<special_shifted_chebyshev_polynomial_u_x_scalar_out::schema>();
}

// aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_u_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_x_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_u_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_x_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_u_n_scalar_out::schema> create_special_shifted_chebyshev_polynomial_u_n_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_u_n_scalar_out::name, special_shifted_chebyshev_polynomial_u_n_scalar_out::overload_name)
      .typed<special_shifted_chebyshev_polynomial_u_n_scalar_out::schema>();
}

// aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_u_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_n_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_u_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_n_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adam_::schema> create__fused_adam__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adam_::name, _fused_adam_::overload_name)
      .typed<_fused_adam_::schema>();
}

// aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_adam_::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam__typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_adam_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam__typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adam__tensor_lr::schema> create__fused_adam__tensor_lr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adam__tensor_lr::name, _fused_adam__tensor_lr::overload_name)
      .typed<_fused_adam__tensor_lr::schema>();
}

// aten::_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_adam__tensor_lr::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam__tensor_lr_typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_adam__tensor_lr::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam__tensor_lr_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_sgd_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_sgd_::schema> create__fused_sgd__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_sgd_::name, _fused_sgd_::overload_name)
      .typed<_fused_sgd_::schema>();
}

// aten::_fused_sgd_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_sgd_::call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd__typed_handle();
    return op.call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

// aten::_fused_sgd_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_sgd_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd__typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

// aten::_fused_sgd_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_sgd__tensor_lr::schema> create__fused_sgd__tensor_lr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_sgd__tensor_lr::name, _fused_sgd__tensor_lr::overload_name)
      .typed<_fused_sgd__tensor_lr::schema>();
}

// aten::_fused_sgd_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_sgd__tensor_lr::call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd__tensor_lr_typed_handle();
    return op.call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

// aten::_fused_sgd_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_sgd__tensor_lr::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd__tensor_lr_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

// aten::_propagate_xla_data(Tensor input, Tensor output) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_propagate_xla_data::schema> create__propagate_xla_data_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_propagate_xla_data::name, _propagate_xla_data::overload_name)
      .typed<_propagate_xla_data::schema>();
}

// aten::_propagate_xla_data(Tensor input, Tensor output) -> ()
void _propagate_xla_data::call(const at::Tensor & input, const at::Tensor & output) {
    
    static auto op = create__propagate_xla_data_typed_handle();
    return op.call(input, output);
}

// aten::_propagate_xla_data(Tensor input, Tensor output) -> ()
void _propagate_xla_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & output) {
    
    static auto op = create__propagate_xla_data_typed_handle();
    return op.redispatch(dispatchKeySet, input, output);
}

// aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_new_zeros_with_same_feature_meta_out::schema> create__new_zeros_with_same_feature_meta_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_new_zeros_with_same_feature_meta_out::name, _new_zeros_with_same_feature_meta_out::overload_name)
      .typed<_new_zeros_with_same_feature_meta_out::schema>();
}

// aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _new_zeros_with_same_feature_meta_out::call(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out) {
    
    static auto op = create__new_zeros_with_same_feature_meta_out_typed_handle();
    return op.call(self, other, self_num_batch_dims, out);
}

// aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _new_zeros_with_same_feature_meta_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out) {
    
    static auto op = create__new_zeros_with_same_feature_meta_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, self_num_batch_dims, out);
}

// aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_init_dropout_state_out::schema> create__cudnn_init_dropout_state_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cudnn_init_dropout_state_out::name, _cudnn_init_dropout_state_out::overload_name)
      .typed<_cudnn_init_dropout_state_out::schema>();
}

// aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cudnn_init_dropout_state_out::call(double dropout, bool train, int64_t dropout_seed, at::Tensor & out) {
    
    static auto op = create__cudnn_init_dropout_state_out_typed_handle();
    return op.call(dropout, train, dropout_seed, out);
}

// aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cudnn_init_dropout_state_out::redispatch(c10::DispatchKeySet dispatchKeySet, double dropout, bool train, int64_t dropout_seed, at::Tensor & out) {
    
    static auto op = create__cudnn_init_dropout_state_out_typed_handle();
    return op.redispatch(dispatchKeySet, dropout, train, dropout_seed, out);
}

// aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<native_dropout_out::schema> create_native_dropout_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_dropout_out::name, native_dropout_out::overload_name)
      .typed<native_dropout_out::schema>();
}

// aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> native_dropout_out::call(const at::Tensor & input, double p, ::std::optional<bool> train, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_native_dropout_out_typed_handle();
    return op.call(input, p, train, out0, out1);
}

// aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> native_dropout_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, ::std::optional<bool> train, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_native_dropout_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, p, train, out0, out1);
}

// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<add_Scalar_out::schema> create_add_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(add_Scalar_out::name, add_Scalar_out::overload_name)
      .typed<add_Scalar_out::schema>();
}

// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & add_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_add_Scalar_out_typed_handle();
    return op.call(self, other, alpha, out);
}

// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & add_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_add_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha, out);
}

// aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli_Tensor_out::schema> create_bernoulli_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli_Tensor_out::name, bernoulli_Tensor_out::overload_name)
      .typed<bernoulli_Tensor_out::schema>();
}

// aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bernoulli_Tensor_out::call(const at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_bernoulli_Tensor_out_typed_handle();
    return op.call(self, p, generator, out);
}

// aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bernoulli_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_bernoulli_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator, out);
}

// aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli_Tensor::schema> create_bernoulli_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli_Tensor::name, bernoulli_Tensor::overload_name)
      .typed<bernoulli_Tensor::schema>();
}

// aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor
at::Tensor bernoulli_Tensor::call(const at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli_Tensor_typed_handle();
    return op.call(self, p, generator);
}

// aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor
at::Tensor bernoulli_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator);
}

// aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli_float_out::schema> create_bernoulli_float_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli_float_out::name, bernoulli_float_out::overload_name)
      .typed<bernoulli_float_out::schema>();
}

// aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bernoulli_float_out::call(const at::Tensor & self, double p, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_bernoulli_float_out_typed_handle();
    return op.call(self, p, generator, out);
}

// aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bernoulli_float_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_bernoulli_float_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator, out);
}

// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<binary_cross_entropy_with_logits_out::schema> create_binary_cross_entropy_with_logits_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(binary_cross_entropy_with_logits_out::name, binary_cross_entropy_with_logits_out::overload_name)
      .typed<binary_cross_entropy_with_logits_out::schema>();
}

// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & binary_cross_entropy_with_logits_out::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & pos_weight, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_binary_cross_entropy_with_logits_out_typed_handle();
    return op.call(self, target, weight, pos_weight, reduction, out);
}

// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & binary_cross_entropy_with_logits_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & pos_weight, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_binary_cross_entropy_with_logits_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, pos_weight, reduction, out);
}

// aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bincount_out::schema> create_bincount_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bincount_out::name, bincount_out::overload_name)
      .typed<bincount_out::schema>();
}

// aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bincount_out::call(const at::Tensor & self, const ::std::optional<at::Tensor> & weights, int64_t minlength, at::Tensor & out) {
    
    static auto op = create_bincount_out_typed_handle();
    return op.call(self, weights, minlength, out);
}

// aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bincount_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & weights, int64_t minlength, at::Tensor & out) {
    
    static auto op = create_bincount_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weights, minlength, out);
}

// aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<block_diag_out::schema> create_block_diag_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(block_diag_out::name, block_diag_out::overload_name)
      .typed<block_diag_out::schema>();
}

// aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & block_diag_out::call(at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_block_diag_out_typed_handle();
    return op.call(tensors, out);
}

// aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & block_diag_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_block_diag_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, out);
}

// aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<count_nonzero_dim_IntList_out::schema> create_count_nonzero_dim_IntList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(count_nonzero_dim_IntList_out::name, count_nonzero_dim_IntList_out::overload_name)
      .typed<count_nonzero_dim_IntList_out::schema>();
}

// aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & count_nonzero_dim_IntList_out::call(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
    
    static auto op = create_count_nonzero_dim_IntList_out_typed_handle();
    return op.call(self, dim, out);
}

// aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & count_nonzero_dim_IntList_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
    
    static auto op = create_count_nonzero_dim_IntList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, out);
}

// aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<count_nonzero_out::schema> create_count_nonzero_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(count_nonzero_out::name, count_nonzero_out::overload_name)
      .typed<count_nonzero_out::schema>();
}

// aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & count_nonzero_out::call(const at::Tensor & self, ::std::optional<int64_t> dim, at::Tensor & out) {
    
    static auto op = create_count_nonzero_out_typed_handle();
    return op.call(self, dim, out);
}

// aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & count_nonzero_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim, at::Tensor & out) {
    
    static auto op = create_count_nonzero_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, out);
}

// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution_add_relu_out::schema> create_cudnn_convolution_add_relu_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_convolution_add_relu_out::name, cudnn_convolution_add_relu_out::overload_name)
      .typed<cudnn_convolution_add_relu_out::schema>();
}

// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_convolution_add_relu_out::call(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const ::std::optional<at::Scalar> & alpha, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create_cudnn_convolution_add_relu_out_typed_handle();
    return op.call(self, weight, z, alpha, bias, stride, padding, dilation, groups, out);
}

// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_convolution_add_relu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const ::std::optional<at::Scalar> & alpha, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create_cudnn_convolution_add_relu_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, z, alpha, bias, stride, padding, dilation, groups, out);
}

// aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_ctc_loss_backward_out::schema> create__ctc_loss_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_ctc_loss_backward_out::name, _ctc_loss_backward_out::overload_name)
      .typed<_ctc_loss_backward_out::schema>();
}

// aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _ctc_loss_backward_out::call(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out) {
    
    static auto op = create__ctc_loss_backward_out_typed_handle();
    return op.call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity, out);
}

// aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _ctc_loss_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out) {
    
    static auto op = create__ctc_loss_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity, out);
}

// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<diagonal_backward_out::schema> create_diagonal_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diagonal_backward_out::name, diagonal_backward_out::overload_name)
      .typed<diagonal_backward_out::schema>();
}

// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diagonal_backward_out::call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
    
    static auto op = create_diagonal_backward_out_typed_handle();
    return op.call(grad_output, input_sizes, offset, dim1, dim2, out);
}

// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diagonal_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
    
    static auto op = create_diagonal_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input_sizes, offset, dim1, dim2, out);
}

// aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<embedding_renorm_out::schema> create_embedding_renorm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(embedding_renorm_out::name, embedding_renorm_out::overload_name)
      .typed<embedding_renorm_out::schema>();
}

// aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & embedding_renorm_out::call(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type, at::Tensor & out) {
    
    static auto op = create_embedding_renorm_out_typed_handle();
    return op.call(self, indices, max_norm, norm_type, out);
}

// aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & embedding_renorm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type, at::Tensor & out) {
    
    static auto op = create_embedding_renorm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, max_norm, norm_type, out);
}

// aten::embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<embedding_renorm::schema> create_embedding_renorm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(embedding_renorm::name, embedding_renorm::overload_name)
      .typed<embedding_renorm::schema>();
}

// aten::embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor
at::Tensor embedding_renorm::call(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
    
    static auto op = create_embedding_renorm_typed_handle();
    return op.call(self, indices, max_norm, norm_type);
}

// aten::embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor
at::Tensor embedding_renorm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
    
    static auto op = create_embedding_renorm_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, max_norm, norm_type);
}

// aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_per_sample_weights_backward_out::schema> create__embedding_bag_per_sample_weights_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_embedding_bag_per_sample_weights_backward_out::name, _embedding_bag_per_sample_weights_backward_out::overload_name)
      .typed<_embedding_bag_per_sample_weights_backward_out::schema>();
}

// aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _embedding_bag_per_sample_weights_backward_out::call(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out) {
    
    static auto op = create__embedding_bag_per_sample_weights_backward_out_typed_handle();
    return op.call(grad, weight, indices, offsets, offset2bag, mode, padding_idx, out);
}

// aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _embedding_bag_per_sample_weights_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out) {
    
    static auto op = create__embedding_bag_per_sample_weights_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad, weight, indices, offsets, offset2bag, mode, padding_idx, out);
}

// aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<empty_names_out::schema> create_empty_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_names_out::name, empty_names_out::overload_name)
      .typed<empty_names_out::schema>();
}

// aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_names_out::call(at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_empty_names_out_typed_handle();
    return op.call(size, names, memory_format, out);
}

// aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_empty_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, names, memory_format, out);
}

// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<new_empty_strided_out::schema> create_new_empty_strided_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_empty_strided_out::name, new_empty_strided_out::overload_name)
      .typed<new_empty_strided_out::schema>();
}

// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_empty_strided_out::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_new_empty_strided_out_typed_handle();
    return op.call(self, size, stride, out);
}

// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_empty_strided_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_new_empty_strided_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, stride, out);
}

// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<new_full_out::schema> create_new_full_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_full_out::name, new_full_out::overload_name)
      .typed<new_full_out::schema>();
}

// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_full_out::call(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
    
    static auto op = create_new_full_out_typed_handle();
    return op.call(self, size, fill_value, out);
}

// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_full_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
    
    static auto op = create_new_full_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, fill_value, out);
}

// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<new_ones_out::schema> create_new_ones_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_ones_out::name, new_ones_out::overload_name)
      .typed<new_ones_out::schema>();
}

// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_ones_out::call(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_new_ones_out_typed_handle();
    return op.call(self, size, out);
}

// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_ones_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_new_ones_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, out);
}

// aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_empty_per_channel_affine_quantized_out::schema> create__empty_per_channel_affine_quantized_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_empty_per_channel_affine_quantized_out::name, _empty_per_channel_affine_quantized_out::overload_name)
      .typed<_empty_per_channel_affine_quantized_out::schema>();
}

// aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _empty_per_channel_affine_quantized_out::call(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create__empty_per_channel_affine_quantized_out_typed_handle();
    return op.call(size, scales, zero_points, axis, memory_format, out);
}

// aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _empty_per_channel_affine_quantized_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create__empty_per_channel_affine_quantized_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, scales, zero_points, axis, memory_format, out);
}

// aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<empty_quantized_out::schema> create_empty_quantized_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_quantized_out::name, empty_quantized_out::overload_name)
      .typed<empty_quantized_out::schema>();
}

// aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_quantized_out::call(at::IntArrayRef size, const at::Tensor & qtensor, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_empty_quantized_out_typed_handle();
    return op.call(size, qtensor, memory_format, out);
}

// aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_quantized_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & qtensor, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_empty_quantized_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, qtensor, memory_format, out);
}

// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<empty_strided_out::schema> create_empty_strided_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_strided_out::name, empty_strided_out::overload_name)
      .typed<empty_strided_out::schema>();
}

// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_strided_out::call(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_empty_strided_out_typed_handle();
    return op.call(size, stride, out);
}

// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_strided_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_empty_strided_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, stride, out);
}

// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<from_file_out::schema> create_from_file_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(from_file_out::name, from_file_out::overload_name)
      .typed<from_file_out::schema>();
}

// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & from_file_out::call(c10::string_view filename, ::std::optional<bool> shared, ::std::optional<int64_t> size, at::Tensor & out) {
    
    static auto op = create_from_file_out_typed_handle();
    return op.call(filename, shared, size, out);
}

// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & from_file_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, ::std::optional<bool> shared, ::std::optional<int64_t> size, at::Tensor & out) {
    
    static auto op = create_from_file_out_typed_handle();
    return op.redispatch(dispatchKeySet, filename, shared, size, out);
}

// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<native_layer_norm_backward_out::schema> create_native_layer_norm_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_layer_norm_backward_out::name, native_layer_norm_backward_out::overload_name)
      .typed<native_layer_norm_backward_out::schema>();
}

// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_out::call(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_native_layer_norm_backward_out_typed_handle();
    return op.call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2);
}

// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_native_layer_norm_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2);
}

// aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool3d_out::schema> create_mkldnn_max_pool3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_max_pool3d_out::name, mkldnn_max_pool3d_out::overload_name)
      .typed<mkldnn_max_pool3d_out::schema>();
}

// aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_max_pool3d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_mkldnn_max_pool3d_out_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_max_pool3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_mkldnn_max_pool3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<quantized_max_pool3d_out::schema> create_quantized_max_pool3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantized_max_pool3d_out::name, quantized_max_pool3d_out::overload_name)
      .typed<quantized_max_pool3d_out::schema>();
}

// aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantized_max_pool3d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_quantized_max_pool3d_out_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantized_max_pool3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_quantized_max_pool3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<mps_convolution_backward_out::schema> create_mps_convolution_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mps_convolution_backward_out::name, mps_convolution_backward_out::overload_name)
      .typed<mps_convolution_backward_out::schema>();
}

// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mps_convolution_backward_out::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_mps_convolution_backward_out_typed_handle();
    return op.call(self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2);
}

// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mps_convolution_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_mps_convolution_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2);
}

// aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_rnn_layer_out::schema> create_mkldnn_rnn_layer_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_rnn_layer_out::name, mkldnn_rnn_layer_out::overload_name)
      .typed<mkldnn_rnn_layer_out::schema>();
}

// aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_out::call(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
    
    static auto op = create_mkldnn_rnn_layer_out_typed_handle();
    return op.call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0, out1, out2, out3);
}

// aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
    
    static auto op = create_mkldnn_rnn_layer_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0, out1, out2, out3);
}

// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<miopen_convolution_out::schema> create_miopen_convolution_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_convolution_out::name, miopen_convolution_out::overload_name)
      .typed<miopen_convolution_out::schema>();
}

// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & miopen_convolution_out::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) {
    
    static auto op = create_miopen_convolution_out_typed_handle();
    return op.call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
}

// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & miopen_convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) {
    
    static auto op = create_miopen_convolution_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
}

// aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
static C10_NOINLINE c10::TypedOperatorHandle<miopen_rnn_out::schema> create_miopen_rnn_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_rnn_out::name, miopen_rnn_out::overload_name)
      .typed<miopen_rnn_out::schema>();
}

// aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> miopen_rnn_out::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
    
    static auto op = create_miopen_rnn_out_typed_handle();
    return op.call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
}

// aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> miopen_rnn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
    
    static auto op = create_miopen_rnn_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
}

// aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sparse_matmul_out::schema> create__sparse_sparse_matmul_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_sparse_matmul_out::name, _sparse_sparse_matmul_out::overload_name)
      .typed<_sparse_sparse_matmul_out::schema>();
}

// aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_sparse_matmul_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create__sparse_sparse_matmul_out_typed_handle();
    return op.call(self, other, out);
}

// aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_sparse_matmul_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create__sparse_sparse_matmul_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)
static C10_NOINLINE c10::TypedOperatorHandle<_native_batch_norm_legit_functional::schema> create__native_batch_norm_legit_functional_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_native_batch_norm_legit_functional::name, _native_batch_norm_legit_functional::overload_name)
      .typed<_native_batch_norm_legit_functional::schema>();
}

// aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_functional::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) {
    
    static auto op = create__native_batch_norm_legit_functional_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, training, momentum, eps);
}

// aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_functional::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) {
    
    static auto op = create__native_batch_norm_legit_functional_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps);
}

// aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_update_stats_out::schema> create_batch_norm_update_stats_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_update_stats_out::name, batch_norm_update_stats_out::overload_name)
      .typed<batch_norm_update_stats_out::schema>();
}

// aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> batch_norm_update_stats_out::call(const at::Tensor & input, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_batch_norm_update_stats_out_typed_handle();
    return op.call(input, running_mean, running_var, momentum, out0, out1);
}

// aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> batch_norm_update_stats_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_batch_norm_update_stats_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, running_mean, running_var, momentum, out0, out1);
}

// aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ones_like_out::schema> create_ones_like_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ones_like_out::name, ones_like_out::overload_name)
      .typed<ones_like_out::schema>();
}

// aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ones_like_out::call(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_ones_like_out_typed_handle();
    return op.call(self, memory_format, out);
}

// aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ones_like_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_ones_like_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, memory_format, out);
}

// aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_euclidean_dist_out::schema> create__euclidean_dist_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_euclidean_dist_out::name, _euclidean_dist_out::overload_name)
      .typed<_euclidean_dist_out::schema>();
}

// aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _euclidean_dist_out::call(const at::Tensor & x1, const at::Tensor & x2, at::Tensor & out) {
    
    static auto op = create__euclidean_dist_out_typed_handle();
    return op.call(x1, x2, out);
}

// aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _euclidean_dist_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, at::Tensor & out) {
    
    static auto op = create__euclidean_dist_out_typed_handle();
    return op.redispatch(dispatchKeySet, x1, x2, out);
}

// aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_cdist_backward_out::schema> create__cdist_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cdist_backward_out::name, _cdist_backward_out::overload_name)
      .typed<_cdist_backward_out::schema>();
}

// aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cdist_backward_out::call(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out) {
    
    static auto op = create__cdist_backward_out_typed_handle();
    return op.call(grad, x1, x2, p, cdist, out);
}

// aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cdist_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out) {
    
    static auto op = create__cdist_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad, x1, x2, p, cdist, out);
}

// aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_pdist_forward_out::schema> create__pdist_forward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_pdist_forward_out::name, _pdist_forward_out::overload_name)
      .typed<_pdist_forward_out::schema>();
}

// aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _pdist_forward_out::call(const at::Tensor & self, double p, at::Tensor & out) {
    
    static auto op = create__pdist_forward_out_typed_handle();
    return op.call(self, p, out);
}

// aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _pdist_forward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, at::Tensor & out) {
    
    static auto op = create__pdist_forward_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, out);
}

// aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scalar_tensor_out::schema> create_scalar_tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scalar_tensor_out::name, scalar_tensor_out::overload_name)
      .typed<scalar_tensor_out::schema>();
}

// aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scalar_tensor_out::call(const at::Scalar & s, at::Tensor & out) {
    
    static auto op = create_scalar_tensor_out_typed_handle();
    return op.call(s, out);
}

// aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scalar_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & s, at::Tensor & out) {
    
    static auto op = create_scalar_tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, s, out);
}

// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rand_names_out::schema> create_rand_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_names_out::name, rand_names_out::overload_name)
      .typed<rand_names_out::schema>();
}

// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_names_out::call(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_rand_names_out_typed_handle();
    return op.call(size, names, out);
}

// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_rand_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, names, out);
}

// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rand_generator_with_names_out::schema> create_rand_generator_with_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_generator_with_names_out::name, rand_generator_with_names_out::overload_name)
      .typed<rand_generator_with_names_out::schema>();
}

// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_generator_with_names_out::call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_rand_generator_with_names_out_typed_handle();
    return op.call(size, generator, names, out);
}

// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_generator_with_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_rand_generator_with_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, generator, names, out);
}

// aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randn_like_out::schema> create_randn_like_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randn_like_out::name, randn_like_out::overload_name)
      .typed<randn_like_out::schema>();
}

// aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randn_like_out::call(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_randn_like_out_typed_handle();
    return op.call(self, memory_format, out);
}

// aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randn_like_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_randn_like_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, memory_format, out);
}

// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<repeat_out::schema> create_repeat_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(repeat_out::name, repeat_out::overload_name)
      .typed<repeat_out::schema>();
}

// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & repeat_out::call(const at::Tensor & self, c10::SymIntArrayRef repeats, at::Tensor & out) {
    
    static auto op = create_repeat_out_typed_handle();
    return op.call(self, repeats, out);
}

// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & repeat_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef repeats, at::Tensor & out) {
    
    static auto op = create_repeat_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, repeats, out);
}

// aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<repeat_interleave_Tensor_out::schema> create_repeat_interleave_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(repeat_interleave_Tensor_out::name, repeat_interleave_Tensor_out::overload_name)
      .typed<repeat_interleave_Tensor_out::schema>();
}

// aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & repeat_interleave_Tensor_out::call(const at::Tensor & repeats, ::std::optional<c10::SymInt> output_size, at::Tensor & out) {
    
    static auto op = create_repeat_interleave_Tensor_out_typed_handle();
    return op.call(repeats, output_size, out);
}

// aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & repeat_interleave_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & repeats, ::std::optional<c10::SymInt> output_size, at::Tensor & out) {
    
    static auto op = create_repeat_interleave_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, repeats, output_size, out);
}

// aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_mkldnn_reshape_out::schema> create__mkldnn_reshape_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_mkldnn_reshape_out::name, _mkldnn_reshape_out::overload_name)
      .typed<_mkldnn_reshape_out::schema>();
}

// aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _mkldnn_reshape_out::call(const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out) {
    
    static auto op = create__mkldnn_reshape_out_typed_handle();
    return op.call(self, shape, out);
}

// aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _mkldnn_reshape_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out) {
    
    static auto op = create__mkldnn_reshape_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, shape, out);
}

// aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sum_out::schema> create_sum_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sum_out::name, sum_out::overload_name)
      .typed<sum_out::schema>();
}

// aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sum_out::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_sum_out_typed_handle();
    return op.call(self, dtype, out);
}

// aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sum_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_sum_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, out);
}

// aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rot90_out::schema> create_rot90_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rot90_out::name, rot90_out::overload_name)
      .typed<rot90_out::schema>();
}

// aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rot90_out::call(const at::Tensor & self, int64_t k, at::IntArrayRef dims, at::Tensor & out) {
    
    static auto op = create_rot90_out_typed_handle();
    return op.call(self, k, dims, out);
}

// aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rot90_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::IntArrayRef dims, at::Tensor & out) {
    
    static auto op = create_rot90_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, k, dims, out);
}

// aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_strides_out::schema> create__nested_tensor_strides_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_strides_out::name, _nested_tensor_strides_out::overload_name)
      .typed<_nested_tensor_strides_out::schema>();
}

// aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_strides_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__nested_tensor_strides_out_typed_handle();
    return op.call(self, out);
}

// aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_strides_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__nested_tensor_strides_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_nested_tensor_storage_offsets.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_storage_offsets_out::schema> create__nested_tensor_storage_offsets_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_storage_offsets_out::name, _nested_tensor_storage_offsets_out::overload_name)
      .typed<_nested_tensor_storage_offsets_out::schema>();
}

// aten::_nested_tensor_storage_offsets.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_storage_offsets_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__nested_tensor_storage_offsets_out_typed_handle();
    return op.call(self, out);
}

// aten::_nested_tensor_storage_offsets.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_storage_offsets_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__nested_tensor_storage_offsets_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<var_mean_correction_out::schema> create_var_mean_correction_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_mean_correction_out::name, var_mean_correction_out::overload_name)
      .typed<var_mean_correction_out::schema>();
}

// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> var_mean_correction_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_var_mean_correction_out_typed_handle();
    return op.call(self, dim, correction, keepdim, out0, out1);
}

// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> var_mean_correction_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_var_mean_correction_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim, out0, out1);
}

// aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_standard_gamma_grad_out::schema> create__standard_gamma_grad_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_standard_gamma_grad_out::name, _standard_gamma_grad_out::overload_name)
      .typed<_standard_gamma_grad_out::schema>();
}

// aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _standard_gamma_grad_out::call(const at::Tensor & self, const at::Tensor & output, at::Tensor & out) {
    
    static auto op = create__standard_gamma_grad_out_typed_handle();
    return op.call(self, output, out);
}

// aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _standard_gamma_grad_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & output, at::Tensor & out) {
    
    static auto op = create__standard_gamma_grad_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output, out);
}

// aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<native_norm_out::schema> create_native_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_norm_out::name, native_norm_out::overload_name)
      .typed<native_norm_out::schema>();
}

// aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & native_norm_out::call(const at::Tensor & self, const at::Scalar & p, at::Tensor & out) {
    
    static auto op = create_native_norm_out_typed_handle();
    return op.call(self, p, out);
}

// aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & native_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, at::Tensor & out) {
    
    static auto op = create_native_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, out);
}

// aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<native_norm_ScalarOpt_dim_dtype_out::schema> create_native_norm_ScalarOpt_dim_dtype_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_norm_ScalarOpt_dim_dtype_out::name, native_norm_ScalarOpt_dim_dtype_out::overload_name)
      .typed<native_norm_ScalarOpt_dim_dtype_out::schema>();
}

// aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & native_norm_ScalarOpt_dim_dtype_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_native_norm_ScalarOpt_dim_dtype_out_typed_handle();
    return op.call(self, p, dim, keepdim, dtype, out);
}

// aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & native_norm_ScalarOpt_dim_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_native_norm_ScalarOpt_dim_dtype_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out);
}

// aten::_batch_norm_with_update_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)
static C10_NOINLINE c10::TypedOperatorHandle<_batch_norm_with_update_functional::schema> create__batch_norm_with_update_functional_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_batch_norm_with_update_functional::name, _batch_norm_with_update_functional::overload_name)
      .typed<_batch_norm_with_update_functional::schema>();
}

// aten::_batch_norm_with_update_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_with_update_functional::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) {
    
    static auto op = create__batch_norm_with_update_functional_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, momentum, eps);
}

// aten::_batch_norm_with_update_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_with_update_functional::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) {
    
    static auto op = create__batch_norm_with_update_functional_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, momentum, eps);
}

// aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sum_backward_out::schema> create__sparse_sum_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_sum_backward_out::name, _sparse_sum_backward_out::overload_name)
      .typed<_sparse_sum_backward_out::schema>();
}

// aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_sum_backward_out::call(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
    
    static auto op = create__sparse_sum_backward_out_typed_handle();
    return op.call(grad, self, dim, out);
}

// aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_sum_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
    
    static auto op = create__sparse_sum_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad, self, dim, out);
}

// aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_csr_sum_dim_dtype_out::schema> create__sparse_csr_sum_dim_dtype_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_csr_sum_dim_dtype_out::name, _sparse_csr_sum_dim_dtype_out::overload_name)
      .typed<_sparse_csr_sum_dim_dtype_out::schema>();
}

// aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_csr_sum_dim_dtype_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create__sparse_csr_sum_dim_dtype_out_typed_handle();
    return op.call(self, dim, keepdim, dtype, out);
}

// aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_csr_sum_dim_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create__sparse_csr_sum_dim_dtype_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
}

// aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_softmax_out::schema> create__sparse_softmax_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_softmax_out::name, _sparse_softmax_out::overload_name)
      .typed<_sparse_softmax_out::schema>();
}

// aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_softmax_out::call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
    
    static auto op = create__sparse_softmax_out_typed_handle();
    return op.call(self, dim, half_to_float, out);
}

// aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_softmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
    
    static auto op = create__sparse_softmax_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, half_to_float, out);
}

// aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<norm_ScalarOpt_dtype_out::schema> create_norm_ScalarOpt_dtype_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_ScalarOpt_dtype_out::name, norm_ScalarOpt_dtype_out::overload_name)
      .typed<norm_ScalarOpt_dtype_out::schema>();
}

// aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_ScalarOpt_dtype_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_norm_ScalarOpt_dtype_out_typed_handle();
    return op.call(self, p, dtype, out);
}

// aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_ScalarOpt_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_norm_ScalarOpt_dtype_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dtype, out);
}

// aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<norm_Scalar_out::schema> create_norm_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_Scalar_out::name, norm_Scalar_out::overload_name)
      .typed<norm_Scalar_out::schema>();
}

// aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_Scalar_out::call(const at::Tensor & self, const at::Scalar & p, at::Tensor & out) {
    
    static auto op = create_norm_Scalar_out_typed_handle();
    return op.call(self, p, out);
}

// aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, at::Tensor & out) {
    
    static auto op = create_norm_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, out);
}

// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_coo_tensor_with_dims_and_tensors_out::schema> create__sparse_coo_tensor_with_dims_and_tensors_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_coo_tensor_with_dims_and_tensors_out::name, _sparse_coo_tensor_with_dims_and_tensors_out::overload_name)
      .typed<_sparse_coo_tensor_with_dims_and_tensors_out::schema>();
}

// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out::call(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<bool> is_coalesced, at::Tensor & out) {
    
    static auto op = create__sparse_coo_tensor_with_dims_and_tensors_out_typed_handle();
    return op.call(sparse_dim, dense_dim, size, indices, values, is_coalesced, out);
}

// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<bool> is_coalesced, at::Tensor & out) {
    
    static auto op = create__sparse_coo_tensor_with_dims_and_tensors_out_typed_handle();
    return op.redispatch(dispatchKeySet, sparse_dim, dense_dim, size, indices, values, is_coalesced, out);
}

// aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_mask_projection_out::schema> create__sparse_mask_projection_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_mask_projection_out::name, _sparse_mask_projection_out::overload_name)
      .typed<_sparse_mask_projection_out::schema>();
}

// aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_mask_projection_out::call(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches, at::Tensor & out) {
    
    static auto op = create__sparse_mask_projection_out_typed_handle();
    return op.call(self, mask, accumulate_matches, out);
}

// aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_mask_projection_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches, at::Tensor & out) {
    
    static auto op = create__sparse_mask_projection_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, accumulate_matches, out);
}

// aten::_to_dense.out(Tensor self, ScalarType? dtype=None, bool? masked_grad=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_to_dense_out::schema> create__to_dense_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_dense_out::name, _to_dense_out::overload_name)
      .typed<_to_dense_out::schema>();
}

// aten::_to_dense.out(Tensor self, ScalarType? dtype=None, bool? masked_grad=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_dense_out::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad, at::Tensor & out) {
    
    static auto op = create__to_dense_out_typed_handle();
    return op.call(self, dtype, masked_grad, out);
}

// aten::_to_dense.out(Tensor self, ScalarType? dtype=None, bool? masked_grad=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_dense_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad, at::Tensor & out) {
    
    static auto op = create__to_dense_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, masked_grad, out);
}

// aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_coalesced_out::schema> create__coalesced_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_coalesced_out::name, _coalesced_out::overload_name)
      .typed<_coalesced_out::schema>();
}

// aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _coalesced_out::call(const at::Tensor & self, bool coalesced, at::Tensor & out) {
    
    static auto op = create__coalesced_out_typed_handle();
    return op.call(self, coalesced, out);
}

// aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _coalesced_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool coalesced, at::Tensor & out) {
    
    static auto op = create__coalesced_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, coalesced, out);
}

// aten::_coalesced(Tensor self, bool coalesced) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_coalesced::schema> create__coalesced_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_coalesced::name, _coalesced::overload_name)
      .typed<_coalesced::schema>();
}

// aten::_coalesced(Tensor self, bool coalesced) -> Tensor
at::Tensor _coalesced::call(const at::Tensor & self, bool coalesced) {
    
    static auto op = create__coalesced_typed_handle();
    return op.call(self, coalesced);
}

// aten::_coalesced(Tensor self, bool coalesced) -> Tensor
at::Tensor _coalesced::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool coalesced) {
    
    static auto op = create__coalesced_typed_handle();
    return op.redispatch(dispatchKeySet, self, coalesced);
}

// aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor_dynamic_out::schema> create_quantize_per_tensor_dynamic_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor_dynamic_out::name, quantize_per_tensor_dynamic_out::overload_name)
      .typed<quantize_per_tensor_dynamic_out::schema>();
}

// aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantize_per_tensor_dynamic_out::call(const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out) {
    
    static auto op = create_quantize_per_tensor_dynamic_out_typed_handle();
    return op.call(self, dtype, reduce_range, out);
}

// aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantize_per_tensor_dynamic_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out) {
    
    static auto op = create_quantize_per_tensor_dynamic_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, reduce_range, out);
}

// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor_out::schema> create_quantize_per_tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor_out::name, quantize_per_tensor_out::overload_name)
      .typed<quantize_per_tensor_out::schema>();
}

// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantize_per_tensor_out::call(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_quantize_per_tensor_out_typed_handle();
    return op.call(self, scale, zero_point, dtype, out);
}

// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantize_per_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_quantize_per_tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, dtype, out);
}

// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor_tensor_qparams_out::schema> create_quantize_per_tensor_tensor_qparams_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor_tensor_qparams_out::name, quantize_per_tensor_tensor_qparams_out::overload_name)
      .typed<quantize_per_tensor_tensor_qparams_out::schema>();
}

// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantize_per_tensor_tensor_qparams_out::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_quantize_per_tensor_tensor_qparams_out_typed_handle();
    return op.call(self, scale, zero_point, dtype, out);
}

// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantize_per_tensor_tensor_qparams_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_quantize_per_tensor_tensor_qparams_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, dtype, out);
}

// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor_tensors_out::schema> create_quantize_per_tensor_tensors_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor_tensors_out::name, quantize_per_tensor_tensors_out::overload_name)
      .typed<quantize_per_tensor_tensors_out::schema>();
}

// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()
void quantize_per_tensor_tensors_out::call(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) {
    
    static auto op = create_quantize_per_tensor_tensors_out_typed_handle();
    return op.call(tensors, scales, zero_points, dtype, out);
}

// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()
void quantize_per_tensor_tensors_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) {
    
    static auto op = create_quantize_per_tensor_tensors_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, scales, zero_points, dtype, out);
}

// aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_tensor_affine_cachemask_out::schema> create_fake_quantize_per_tensor_affine_cachemask_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fake_quantize_per_tensor_affine_cachemask_out::name, fake_quantize_per_tensor_affine_cachemask_out::overload_name)
      .typed<fake_quantize_per_tensor_affine_cachemask_out::schema>();
}

// aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_out::call(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_fake_quantize_per_tensor_affine_cachemask_out_typed_handle();
    return op.call(self, scale, zero_point, quant_min, quant_max, out0, out1);
}

// aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_fake_quantize_per_tensor_affine_cachemask_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, out0, out1);
}

// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::schema> create__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::name, _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::overload_name)
      .typed<_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::schema>();
}

// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out_typed_handle();
    return op.call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1);
}

// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1);
}

// aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_learnable_per_tensor_affine_out::schema> create__fake_quantize_learnable_per_tensor_affine_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fake_quantize_learnable_per_tensor_affine_out::name, _fake_quantize_learnable_per_tensor_affine_out::overload_name)
      .typed<_fake_quantize_learnable_per_tensor_affine_out::schema>();
}

// aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _fake_quantize_learnable_per_tensor_affine_out::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) {
    
    static auto op = create__fake_quantize_learnable_per_tensor_affine_out_typed_handle();
    return op.call(self, scale, zero_point, quant_min, quant_max, grad_factor, out);
}

// aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _fake_quantize_learnable_per_tensor_affine_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) {
    
    static auto op = create__fake_quantize_learnable_per_tensor_affine_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, grad_factor, out);
}

// aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_gru_cell_backward_out::schema> create__thnn_fused_gru_cell_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_thnn_fused_gru_cell_backward_out::name, _thnn_fused_gru_cell_backward_out::overload_name)
      .typed<_thnn_fused_gru_cell_backward_out::schema>();
}

// aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_backward_out::call(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
    
    static auto op = create__thnn_fused_gru_cell_backward_out_typed_handle();
    return op.call(grad_hy, workspace, has_bias, out0, out1, out2, out3, out4);
}

// aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
    
    static auto op = create__thnn_fused_gru_cell_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_hy, workspace, has_bias, out0, out1, out2, out3, out4);
}

// aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set_source_Storage_out::schema> create_set_source_Storage_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_source_Storage_out::name, set_source_Storage_out::overload_name)
      .typed<set_source_Storage_out::schema>();
}

// aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_source_Storage_out::call(const at::Tensor & self, at::Storage source, at::Tensor & out) {
    
    static auto op = create_set_source_Storage_out_typed_handle();
    return op.call(self, source, out);
}

// aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_source_Storage_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, at::Tensor & out) {
    
    static auto op = create_set_source_Storage_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, out);
}

// aten::set.source_Storage(Tensor self, Storage source) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<set_source_Storage::schema> create_set_source_Storage_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_source_Storage::name, set_source_Storage::overload_name)
      .typed<set_source_Storage::schema>();
}

// aten::set.source_Storage(Tensor self, Storage source) -> Tensor
at::Tensor set_source_Storage::call(const at::Tensor & self, at::Storage source) {
    
    static auto op = create_set_source_Storage_typed_handle();
    return op.call(self, source);
}

// aten::set.source_Storage(Tensor self, Storage source) -> Tensor
at::Tensor set_source_Storage::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source) {
    
    static auto op = create_set_source_Storage_typed_handle();
    return op.redispatch(dispatchKeySet, self, source);
}

// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set_source_Storage_storage_offset_out::schema> create_set_source_Storage_storage_offset_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_source_Storage_storage_offset_out::name, set_source_Storage_storage_offset_out::overload_name)
      .typed<set_source_Storage_storage_offset_out::schema>();
}

// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_source_Storage_storage_offset_out::call(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_set_source_Storage_storage_offset_out_typed_handle();
    return op.call(self, source, storage_offset, size, stride, out);
}

// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_source_Storage_storage_offset_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_set_source_Storage_storage_offset_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, storage_offset, size, stride, out);
}

// aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<set_source_Storage_storage_offset::schema> create_set_source_Storage_storage_offset_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_source_Storage_storage_offset::name, set_source_Storage_storage_offset::overload_name)
      .typed<set_source_Storage_storage_offset::schema>();
}

// aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor
at::Tensor set_source_Storage_storage_offset::call(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create_set_source_Storage_storage_offset_typed_handle();
    return op.call(self, source, storage_offset, size, stride);
}

// aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor
at::Tensor set_source_Storage_storage_offset::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create_set_source_Storage_storage_offset_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, storage_offset, size, stride);
}

// aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set_source_Tensor_out::schema> create_set_source_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_source_Tensor_out::name, set_source_Tensor_out::overload_name)
      .typed<set_source_Tensor_out::schema>();
}

// aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_source_Tensor_out::call(const at::Tensor & self, const at::Tensor & source, at::Tensor & out) {
    
    static auto op = create_set_source_Tensor_out_typed_handle();
    return op.call(self, source, out);
}

// aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_source_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & source, at::Tensor & out) {
    
    static auto op = create_set_source_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, out);
}

// aten::set.source_Tensor(Tensor self, Tensor source) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<set_source_Tensor::schema> create_set_source_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_source_Tensor::name, set_source_Tensor::overload_name)
      .typed<set_source_Tensor::schema>();
}

// aten::set.source_Tensor(Tensor self, Tensor source) -> Tensor
at::Tensor set_source_Tensor::call(const at::Tensor & self, const at::Tensor & source) {
    
    static auto op = create_set_source_Tensor_typed_handle();
    return op.call(self, source);
}

// aten::set.source_Tensor(Tensor self, Tensor source) -> Tensor
at::Tensor set_source_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & source) {
    
    static auto op = create_set_source_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, source);
}

// aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set_out::schema> create_set_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_out::name, set_out::overload_name)
      .typed<set_out::schema>();
}

// aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_set_out_typed_handle();
    return op.call(self, out);
}

// aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_set_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::set(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<set::schema> create_set_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set::name, set::overload_name)
      .typed<set::schema>();
}

// aten::set(Tensor self) -> Tensor
at::Tensor set::call(const at::Tensor & self) {
    
    static auto op = create_set_typed_handle();
    return op.call(self);
}

// aten::set(Tensor self) -> Tensor
at::Tensor set::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_set_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<put_out::schema> create_put_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(put_out::name, put_out::overload_name)
      .typed<put_out::schema>();
}

// aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & put_out::call(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out) {
    
    static auto op = create_put_out_typed_handle();
    return op.call(self, index, source, accumulate, out);
}

// aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & put_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out) {
    
    static auto op = create_put_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, index, source, accumulate, out);
}

// aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<uniform_out::schema> create_uniform_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(uniform_out::name, uniform_out::overload_name)
      .typed<uniform_out::schema>();
}

// aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & uniform_out::call(const at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_uniform_out_typed_handle();
    return op.call(self, from, to, generator, out);
}

// aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & uniform_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_uniform_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, from, to, generator, out);
}

// aten::uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<uniform::schema> create_uniform_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(uniform::name, uniform::overload_name)
      .typed<uniform::schema>();
}

// aten::uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor
at::Tensor uniform::call(const at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_uniform_typed_handle();
    return op.call(self, from, to, generator);
}

// aten::uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor
at::Tensor uniform::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_uniform_typed_handle();
    return op.redispatch(dispatchKeySet, self, from, to, generator);
}

// aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<tril_indices_out::schema> create_tril_indices_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tril_indices_out::name, tril_indices_out::overload_name)
      .typed<tril_indices_out::schema>();
}

// aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & tril_indices_out::call(int64_t row, int64_t col, int64_t offset, at::Tensor & out) {
    
    static auto op = create_tril_indices_out_typed_handle();
    return op.call(row, col, offset, out);
}

// aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & tril_indices_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, at::Tensor & out) {
    
    static auto op = create_tril_indices_out_typed_handle();
    return op.redispatch(dispatchKeySet, row, col, offset, out);
}

// aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_cholesky_solve_helper_out::schema> create__cholesky_solve_helper_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cholesky_solve_helper_out::name, _cholesky_solve_helper_out::overload_name)
      .typed<_cholesky_solve_helper_out::schema>();
}

// aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cholesky_solve_helper_out::call(const at::Tensor & self, const at::Tensor & A, bool upper, at::Tensor & out) {
    
    static auto op = create__cholesky_solve_helper_out_typed_handle();
    return op.call(self, A, upper, out);
}

// aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cholesky_solve_helper_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper, at::Tensor & out) {
    
    static auto op = create__cholesky_solve_helper_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, A, upper, out);
}

// aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_amp_update_scale_out::schema> create__amp_update_scale_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_amp_update_scale_out::name, _amp_update_scale_out::overload_name)
      .typed<_amp_update_scale_out::schema>();
}

// aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _amp_update_scale_out::call(const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out) {
    
    static auto op = create__amp_update_scale_out_typed_handle();
    return op.call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out);
}

// aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _amp_update_scale_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out) {
    
    static auto op = create__amp_update_scale_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out);
}

// aten::_amp_update_scale(Tensor self, Tensor growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> (Tensor, Tensor growth_tracker_out)
static C10_NOINLINE c10::TypedOperatorHandle<_amp_update_scale::schema> create__amp_update_scale_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_amp_update_scale::name, _amp_update_scale::overload_name)
      .typed<_amp_update_scale::schema>();
}

// aten::_amp_update_scale(Tensor self, Tensor growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> (Tensor, Tensor growth_tracker_out)
::std::tuple<at::Tensor,at::Tensor> _amp_update_scale::call(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
    
    static auto op = create__amp_update_scale_typed_handle();
    return op.call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
}

// aten::_amp_update_scale(Tensor self, Tensor growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> (Tensor, Tensor growth_tracker_out)
::std::tuple<at::Tensor,at::Tensor> _amp_update_scale::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
    
    static auto op = create__amp_update_scale_typed_handle();
    return op.redispatch(dispatchKeySet, self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
}

// aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv_Scalar_out::schema> create__foreach_addcdiv_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv_Scalar_out::name, _foreach_addcdiv_Scalar_out::overload_name)
      .typed<_foreach_addcdiv_Scalar_out::schema>();
}

// aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
void _foreach_addcdiv_Scalar_out::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
    
    static auto op = create__foreach_addcdiv_Scalar_out_typed_handle();
    return op.call(self, tensor1, tensor2, value, out);
}

// aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
void _foreach_addcdiv_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
    
    static auto op = create__foreach_addcdiv_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value, out);
}

// aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv_ScalarList_out::schema> create__foreach_addcdiv_ScalarList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv_ScalarList_out::name, _foreach_addcdiv_ScalarList_out::overload_name)
      .typed<_foreach_addcdiv_ScalarList_out::schema>();
}

// aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_addcdiv_ScalarList_out::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_addcdiv_ScalarList_out_typed_handle();
    return op.call(self, tensor1, tensor2, scalars, out);
}

// aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_addcdiv_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_addcdiv_ScalarList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out);
}

// aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv_Tensor_out::schema> create__foreach_addcdiv_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv_Tensor_out::name, _foreach_addcdiv_Tensor_out::overload_name)
      .typed<_foreach_addcdiv_Tensor_out::schema>();
}

// aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
void _foreach_addcdiv_Tensor_out::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
    
    static auto op = create__foreach_addcdiv_Tensor_out_typed_handle();
    return op.call(self, tensor1, tensor2, scalars, out);
}

// aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
void _foreach_addcdiv_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
    
    static auto op = create__foreach_addcdiv_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out);
}

// aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_exp_out::schema> create__foreach_exp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_exp_out::name, _foreach_exp_out::overload_name)
      .typed<_foreach_exp_out::schema>();
}

// aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_exp_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_exp_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_exp_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_exp_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log_out::schema> create__foreach_log_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log_out::name, _foreach_log_out::overload_name)
      .typed<_foreach_log_out::schema>();
}

// aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_log_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_log_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_log_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_log_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log1p_out::schema> create__foreach_log1p_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log1p_out::name, _foreach_log1p_out::overload_name)
      .typed<_foreach_log1p_out::schema>();
}

// aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_log1p_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_log1p_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_log1p_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_log1p_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_neg_out::schema> create__foreach_neg_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_neg_out::name, _foreach_neg_out::overload_name)
      .typed<_foreach_neg_out::schema>();
}

// aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_neg_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_neg_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_neg_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_neg_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, ScalarType? dtype=None, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_norm_Scalar_out::schema> create__foreach_norm_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_norm_Scalar_out::name, _foreach_norm_Scalar_out::overload_name)
      .typed<_foreach_norm_Scalar_out::schema>();
}

// aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, ScalarType? dtype=None, *, Tensor(a!)[] out) -> ()
void _foreach_norm_Scalar_out::call(at::TensorList self, const at::Scalar & ord, ::std::optional<at::ScalarType> dtype, at::TensorList out) {
    
    static auto op = create__foreach_norm_Scalar_out_typed_handle();
    return op.call(self, ord, dtype, out);
}

// aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, ScalarType? dtype=None, *, Tensor(a!)[] out) -> ()
void _foreach_norm_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & ord, ::std::optional<at::ScalarType> dtype, at::TensorList out) {
    
    static auto op = create__foreach_norm_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dtype, out);
}

// aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow_List_out::schema> create__foreach_pow_List_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow_List_out::name, _foreach_pow_List_out::overload_name)
      .typed<_foreach_pow_List_out::schema>();
}

// aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> ()
void _foreach_pow_List_out::call(at::TensorList self, at::TensorList exponent, at::TensorList out) {
    
    static auto op = create__foreach_pow_List_out_typed_handle();
    return op.call(self, exponent, out);
}

// aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> ()
void _foreach_pow_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList exponent, at::TensorList out) {
    
    static auto op = create__foreach_pow_List_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent, out);
}

// aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow_Scalar_out::schema> create__foreach_pow_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow_Scalar_out::name, _foreach_pow_Scalar_out::overload_name)
      .typed<_foreach_pow_Scalar_out::schema>();
}

// aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> ()
void _foreach_pow_Scalar_out::call(at::TensorList self, const at::Scalar & exponent, at::TensorList out) {
    
    static auto op = create__foreach_pow_Scalar_out_typed_handle();
    return op.call(self, exponent, out);
}

// aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> ()
void _foreach_pow_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & exponent, at::TensorList out) {
    
    static auto op = create__foreach_pow_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent, out);
}

// aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow_ScalarList_out::schema> create__foreach_pow_ScalarList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow_ScalarList_out::name, _foreach_pow_ScalarList_out::overload_name)
      .typed<_foreach_pow_ScalarList_out::schema>();
}

// aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> ()
void _foreach_pow_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> exponent, at::TensorList out) {
    
    static auto op = create__foreach_pow_ScalarList_out_typed_handle();
    return op.call(self, exponent, out);
}

// aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> ()
void _foreach_pow_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> exponent, at::TensorList out) {
    
    static auto op = create__foreach_pow_ScalarList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent, out);
}

// aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_reciprocal_out::schema> create__foreach_reciprocal_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_reciprocal_out::name, _foreach_reciprocal_out::overload_name)
      .typed<_foreach_reciprocal_out::schema>();
}

// aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_reciprocal_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_reciprocal_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_reciprocal_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_reciprocal_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_rsqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_rsqrt_out::schema> create__foreach_rsqrt_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_rsqrt_out::name, _foreach_rsqrt_out::overload_name)
      .typed<_foreach_rsqrt_out::schema>();
}

// aten::_foreach_rsqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_rsqrt_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_rsqrt_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_rsqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_rsqrt_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_rsqrt_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sigmoid_out::schema> create__foreach_sigmoid_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sigmoid_out::name, _foreach_sigmoid_out::overload_name)
      .typed<_foreach_sigmoid_out::schema>();
}

// aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sigmoid_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sigmoid_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sigmoid_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sigmoid_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sin_out::schema> create__foreach_sin_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sin_out::name, _foreach_sin_out::overload_name)
      .typed<_foreach_sin_out::schema>();
}

// aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sin_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sin_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sin_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sin_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sqrt_out::schema> create__foreach_sqrt_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sqrt_out::name, _foreach_sqrt_out::overload_name)
      .typed<_foreach_sqrt_out::schema>();
}

// aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sqrt_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sqrt_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sqrt_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sqrt_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<glu_jvp_out::schema> create_glu_jvp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(glu_jvp_out::name, glu_jvp_out::overload_name)
      .typed<glu_jvp_out::schema>();
}

// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & glu_jvp_out::call(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out) {
    
    static auto op = create_glu_jvp_out_typed_handle();
    return op.call(glu, x, dx, dim, out);
}

// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & glu_jvp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out) {
    
    static auto op = create_glu_jvp_out_typed_handle();
    return op.redispatch(dispatchKeySet, glu, x, dx, dim, out);
}

// aten::rrelu_with_noise_functional(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> (Tensor, Tensor noise_out)
static C10_NOINLINE c10::TypedOperatorHandle<rrelu_with_noise_functional::schema> create_rrelu_with_noise_functional_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rrelu_with_noise_functional::name, rrelu_with_noise_functional::overload_name)
      .typed<rrelu_with_noise_functional::schema>();
}

// aten::rrelu_with_noise_functional(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> (Tensor, Tensor noise_out)
::std::tuple<at::Tensor,at::Tensor> rrelu_with_noise_functional::call(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu_with_noise_functional_typed_handle();
    return op.call(self, noise, lower, upper, training, generator);
}

// aten::rrelu_with_noise_functional(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> (Tensor, Tensor noise_out)
::std::tuple<at::Tensor,at::Tensor> rrelu_with_noise_functional::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu_with_noise_functional_typed_handle();
    return op.redispatch(dispatchKeySet, self, noise, lower, upper, training, generator);
}

// aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool2d_backward_out::schema> create__adaptive_avg_pool2d_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_adaptive_avg_pool2d_backward_out::name, _adaptive_avg_pool2d_backward_out::overload_name)
      .typed<_adaptive_avg_pool2d_backward_out::schema>();
}

// aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _adaptive_avg_pool2d_backward_out::call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__adaptive_avg_pool2d_backward_out_typed_handle();
    return op.call(grad_output, self, out);
}

// aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _adaptive_avg_pool2d_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__adaptive_avg_pool2d_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, out);
}

// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_dilated3d_out::schema> create_slow_conv_dilated3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slow_conv_dilated3d_out::name, slow_conv_dilated3d_out::overload_name)
      .typed<slow_conv_dilated3d_out::schema>();
}

// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slow_conv_dilated3d_out::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
    
    static auto op = create_slow_conv_dilated3d_out_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, dilation, out);
}

// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slow_conv_dilated3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
    
    static auto op = create_slow_conv_dilated3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out);
}

// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<isinf_out::schema> create_isinf_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isinf_out::name, isinf_out::overload_name)
      .typed<isinf_out::schema>();
}

// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isinf_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_isinf_out_typed_handle();
    return op.call(self, out);
}

// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isinf_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_isinf_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_test_optional_floatlist_out::schema> create__test_optional_floatlist_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_optional_floatlist_out::name, _test_optional_floatlist_out::overload_name)
      .typed<_test_optional_floatlist_out::schema>();
}

// aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_optional_floatlist_out::call(const at::Tensor & values, ::std::optional<at::ArrayRef<double>> addends, at::Tensor & out) {
    
    static auto op = create__test_optional_floatlist_out_typed_handle();
    return op.call(values, addends, out);
}

// aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_optional_floatlist_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, ::std::optional<at::ArrayRef<double>> addends, at::Tensor & out) {
    
    static auto op = create__test_optional_floatlist_out_typed_handle();
    return op.redispatch(dispatchKeySet, values, addends, out);
}

// aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_from_tensor_list_out::schema> create__nested_tensor_from_tensor_list_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_from_tensor_list_out::name, _nested_tensor_from_tensor_list_out::overload_name)
      .typed<_nested_tensor_from_tensor_list_out::schema>();
}

// aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_from_tensor_list_out::call(at::TensorList list, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, at::Tensor & out) {
    
    static auto op = create__nested_tensor_from_tensor_list_out_typed_handle();
    return op.call(list, dtype, layout, device, pin_memory, out);
}

// aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_from_tensor_list_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList list, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, at::Tensor & out) {
    
    static auto op = create__nested_tensor_from_tensor_list_out_typed_handle();
    return op.redispatch(dispatchKeySet, list, dtype, layout, device, pin_memory, out);
}

// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_broadcast_to_copy_out::schema> create__sparse_broadcast_to_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_broadcast_to_copy_out::name, _sparse_broadcast_to_copy_out::overload_name)
      .typed<_sparse_broadcast_to_copy_out::schema>();
}

// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_broadcast_to_copy_out::call(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
    
    static auto op = create__sparse_broadcast_to_copy_out_typed_handle();
    return op.call(self, size, out);
}

// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_broadcast_to_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
    
    static auto op = create__sparse_broadcast_to_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, out);
}

// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<transpose_copy_int_out::schema> create_transpose_copy_int_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(transpose_copy_int_out::name, transpose_copy_int_out::overload_name)
      .typed<transpose_copy_int_out::schema>();
}

// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & transpose_copy_int_out::call(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) {
    
    static auto op = create_transpose_copy_int_out_typed_handle();
    return op.call(self, dim0, dim1, out);
}

// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & transpose_copy_int_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) {
    
    static auto op = create_transpose_copy_int_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim0, dim1, out);
}

// aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_indices_copy_out::schema> create__indices_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_indices_copy_out::name, _indices_copy_out::overload_name)
      .typed<_indices_copy_out::schema>();
}

// aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _indices_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__indices_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _indices_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__indices_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_values_copy_out::schema> create__values_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_values_copy_out::name, _values_copy_out::overload_name)
      .typed<_values_copy_out::schema>();
}

// aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _values_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__values_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _values_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__values_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<values_copy_out::schema> create_values_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(values_copy_out::name, values_copy_out::overload_name)
      .typed<values_copy_out::schema>();
}

// aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & values_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_values_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & values_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_values_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<view_copy_out::schema> create_view_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_copy_out::name, view_copy_out::overload_name)
      .typed<view_copy_out::schema>();
}

// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & view_copy_out::call(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_view_copy_out_typed_handle();
    return op.call(self, size, out);
}

// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & view_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_view_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, out);
}

// aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<view_copy_dtype_out::schema> create_view_copy_dtype_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_copy_dtype_out::name, view_copy_dtype_out::overload_name)
      .typed<view_copy_dtype_out::schema>();
}

// aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & view_copy_dtype_out::call(const at::Tensor & self, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_view_copy_dtype_out_typed_handle();
    return op.call(self, dtype, out);
}

// aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & view_copy_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_view_copy_dtype_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, out);
}

// aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<unfold_copy_out::schema> create_unfold_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unfold_copy_out::name, unfold_copy_out::overload_name)
      .typed<unfold_copy_out::schema>();
}

// aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & unfold_copy_out::call(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step, at::Tensor & out) {
    
    static auto op = create_unfold_copy_out_typed_handle();
    return op.call(self, dimension, size, step, out);
}

// aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & unfold_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step, at::Tensor & out) {
    
    static auto op = create_unfold_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dimension, size, step, out);
}

// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adam_out::schema> create__fused_adam_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adam_out::name, _fused_adam_out::overload_name)
      .typed<_fused_adam_out::schema>();
}

// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_adam_out::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_adam_out_typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
}

// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_adam_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_adam_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
}

// aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adam::schema> create__fused_adam_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adam::name, _fused_adam::overload_name)
      .typed<_fused_adam::schema>();
}

// aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam_typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adam_tensor_lr_out::schema> create__fused_adam_tensor_lr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adam_tensor_lr_out::name, _fused_adam_tensor_lr_out::overload_name)
      .typed<_fused_adam_tensor_lr_out::schema>();
}

// aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_adam_tensor_lr_out::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_adam_tensor_lr_out_typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
}

// aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_adam_tensor_lr_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_adam_tensor_lr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
}

// aten::_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adam_tensor_lr::schema> create__fused_adam_tensor_lr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adam_tensor_lr::name, _fused_adam_tensor_lr::overload_name)
      .typed<_fused_adam_tensor_lr::schema>();
}

// aten::_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam_tensor_lr::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam_tensor_lr_typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam_tensor_lr::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam_tensor_lr_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_sgd.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_sgd_out::schema> create__fused_sgd_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_sgd_out::name, _fused_sgd_out::overload_name)
      .typed<_fused_sgd_out::schema>();
}

// aten::_fused_sgd.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_sgd_out::call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_sgd_out_typed_handle();
    return op.call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out);
}

// aten::_fused_sgd.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_sgd_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_sgd_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out);
}

// aten::_fused_sgd(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)
static C10_NOINLINE c10::TypedOperatorHandle<_fused_sgd::schema> create__fused_sgd_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_sgd::name, _fused_sgd::overload_name)
      .typed<_fused_sgd::schema>();
}

// aten::_fused_sgd(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_sgd::call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd_typed_handle();
    return op.call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

// aten::_fused_sgd(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_sgd::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

// aten::_fused_sgd.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_sgd_tensor_lr_out::schema> create__fused_sgd_tensor_lr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_sgd_tensor_lr_out::name, _fused_sgd_tensor_lr_out::overload_name)
      .typed<_fused_sgd_tensor_lr_out::schema>();
}

// aten::_fused_sgd.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_sgd_tensor_lr_out::call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_sgd_tensor_lr_out_typed_handle();
    return op.call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out);
}

// aten::_fused_sgd.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_sgd_tensor_lr_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_sgd_tensor_lr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out);
}

// aten::_fused_sgd.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)
static C10_NOINLINE c10::TypedOperatorHandle<_fused_sgd_tensor_lr::schema> create__fused_sgd_tensor_lr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_sgd_tensor_lr::name, _fused_sgd_tensor_lr::overload_name)
      .typed<_fused_sgd_tensor_lr::schema>();
}

// aten::_fused_sgd.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_sgd_tensor_lr::call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd_tensor_lr_typed_handle();
    return op.call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

// aten::_fused_sgd.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_sgd_tensor_lr::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd_tensor_lr_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

}} // namespace at::_ops
