#include <ATen/Tensor.h>
#include <ATen/core/dispatch/Dispatcher.h>

// @generated by torchgen/gen.py from Operators.cpp
// NOTE See [Sharded File] comment in VariableType

#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Operators.h>
#else
#include <ATen/ops/_cast_Byte.h>
#include <ATen/ops/_cast_Char.h>
#include <ATen/ops/_cast_Double.h>
#include <ATen/ops/_cast_Float.h>
#include <ATen/ops/_cast_Int.h>
#include <ATen/ops/_cast_Long.h>
#include <ATen/ops/_cast_Short.h>
#include <ATen/ops/_cast_Half.h>
#include <ATen/ops/_backward.h>
#include <ATen/ops/set_data.h>
#include <ATen/ops/data.h>
#include <ATen/ops/is_leaf.h>
#include <ATen/ops/output_nr.h>
#include <ATen/ops/_version.h>
#include <ATen/ops/requires_grad.h>
#include <ATen/ops/retain_grad.h>
#include <ATen/ops/retains_grad.h>
#include <ATen/ops/_fw_primal.h>
#include <ATen/ops/_make_dual.h>
#include <ATen/ops/_unpack_dual.h>
#include <ATen/ops/_new_zeros_with_same_feature_meta.h>
#include <ATen/ops/_has_same_storage_numel.h>
#include <ATen/ops/rename.h>
#include <ATen/ops/rename.h>
#include <ATen/ops/align_to.h>
#include <ATen/ops/align_to.h>
#include <ATen/ops/align_as.h>
#include <ATen/ops/align_tensors.h>
#include <ATen/ops/_assert_async.h>
#include <ATen/ops/_assert_async.h>
#include <ATen/ops/_assert_scalar.h>
#include <ATen/ops/_functional_assert_scalar.h>
#include <ATen/ops/_functional_assert_async.h>
#include <ATen/ops/_assert_tensor_metadata.h>
#include <ATen/ops/_print.h>
#include <ATen/ops/sym_constrain_range.h>
#include <ATen/ops/sym_constrain_range_for_size.h>
#include <ATen/ops/_functional_sym_constrain_range.h>
#include <ATen/ops/_functional_sym_constrain_range_for_size.h>
#include <ATen/ops/_make_dep_token.h>
#include <ATen/ops/refine_names.h>
#include <ATen/ops/_use_cudnn_ctc_loss.h>
#include <ATen/ops/_use_cudnn_ctc_loss.h>
#include <ATen/ops/_cudnn_ctc_loss.h>
#include <ATen/ops/_cudnn_ctc_loss.h>
#include <ATen/ops/_use_cudnn_rnn_flatten_weight.h>
#include <ATen/ops/_cudnn_rnn_flatten_weight.h>
#include <ATen/ops/_cudnn_rnn.h>
#include <ATen/ops/_cudnn_rnn_backward.h>
#include <ATen/ops/_cudnn_init_dropout_state.h>
#include <ATen/ops/_debug_has_internal_overlap.h>
#include <ATen/ops/_fused_dropout.h>
#include <ATen/ops/_masked_scale.h>
#include <ATen/ops/native_dropout.h>
#include <ATen/ops/native_dropout_backward.h>
#include <ATen/ops/_sobol_engine_draw.h>
#include <ATen/ops/_sobol_engine_ff.h>
#include <ATen/ops/_sobol_engine_scramble.h>
#include <ATen/ops/_sobol_engine_initialize_state.h>
#include <ATen/ops/_reshape_from_tensor.h>
#include <ATen/ops/_shape_as_tensor.h>
#include <ATen/ops/dropout.h>
#include <ATen/ops/dropout.h>
#include <ATen/ops/feature_dropout.h>
#include <ATen/ops/feature_dropout.h>
#include <ATen/ops/alpha_dropout.h>
#include <ATen/ops/alpha_dropout.h>
#include <ATen/ops/feature_alpha_dropout.h>
#include <ATen/ops/feature_alpha_dropout.h>
#include <ATen/ops/abs.h>
#include <ATen/ops/abs.h>
#include <ATen/ops/abs.h>
#include <ATen/ops/absolute.h>
#include <ATen/ops/absolute.h>
#include <ATen/ops/absolute.h>
#include <ATen/ops/angle.h>
#include <ATen/ops/angle.h>
#include <ATen/ops/view_as_real.h>
#include <ATen/ops/view_as_complex.h>
#include <ATen/ops/sgn.h>
#include <ATen/ops/sgn.h>
#include <ATen/ops/sgn.h>
#include <ATen/ops/chalf.h>
#include <ATen/ops/real.h>
#include <ATen/ops/imag.h>
#include <ATen/ops/_conj.h>
#include <ATen/ops/conj.h>
#include <ATen/ops/_conj_physical.h>
#include <ATen/ops/conj_physical.h>
#include <ATen/ops/conj_physical.h>
#include <ATen/ops/conj_physical.h>
#include <ATen/ops/resolve_conj.h>
#include <ATen/ops/resolve_neg.h>
#include <ATen/ops/_neg_view.h>
#include <ATen/ops/acos.h>
#include <ATen/ops/acos.h>
#include <ATen/ops/acos.h>
#include <ATen/ops/arccos.h>
#include <ATen/ops/arccos.h>
#include <ATen/ops/arccos.h>
#include <ATen/ops/avg_pool1d.h>
#include <ATen/ops/adaptive_avg_pool1d.h>
#include <ATen/ops/adaptive_max_pool1d.h>
#include <ATen/ops/add.h>
#include <ATen/ops/add.h>
#include <ATen/ops/add.h>
#include <ATen/ops/_add_relu.h>
#include <ATen/ops/_add_relu.h>
#include <ATen/ops/_add_relu.h>
#include <ATen/ops/_add_relu.h>
#include <ATen/ops/_add_relu.h>
#include <ATen/ops/add.h>
#include <ATen/ops/add.h>
#include <ATen/ops/addmv.h>
#include <ATen/ops/addmv.h>
#include <ATen/ops/addmv.h>
#include <ATen/ops/addr.h>
#include <ATen/ops/addr.h>
#include <ATen/ops/addr.h>
#include <ATen/ops/affine_grid_generator.h>
#include <ATen/ops/affine_grid_generator_backward.h>
#include <ATen/ops/_is_all_true.h>
#include <ATen/ops/_is_any_true.h>
#include <ATen/ops/_test_check_tensor.h>
#include <ATen/ops/_test_functorch_fallback.h>
#include <ATen/ops/all.h>
#include <ATen/ops/all.h>
#include <ATen/ops/all.h>
#include <ATen/ops/all.h>
#include <ATen/ops/all.h>
#include <ATen/ops/all.h>
#include <ATen/ops/allclose.h>
#include <ATen/ops/any.h>
#include <ATen/ops/any.h>
#include <ATen/ops/any.h>
#include <ATen/ops/any.h>
#include <ATen/ops/any.h>
#include <ATen/ops/any.h>
#include <ATen/ops/arange.h>
#include <ATen/ops/arange.h>
#include <ATen/ops/arange.h>
#include <ATen/ops/arange.h>
#include <ATen/ops/arange.h>
#include <ATen/ops/_dim_arange.h>
#include <ATen/ops/argmax.h>
#include <ATen/ops/argmax.h>
#include <ATen/ops/argmin.h>
#include <ATen/ops/argmin.h>
#include <ATen/ops/acosh.h>
#include <ATen/ops/acosh.h>
#include <ATen/ops/acosh.h>
#include <ATen/ops/arccosh.h>
#include <ATen/ops/arccosh.h>
#include <ATen/ops/arccosh.h>
#include <ATen/ops/asinh.h>
#include <ATen/ops/asinh.h>
#include <ATen/ops/asinh.h>
#include <ATen/ops/arcsinh.h>
#include <ATen/ops/arcsinh.h>
#include <ATen/ops/arcsinh.h>
#include <ATen/ops/atanh.h>
#include <ATen/ops/atanh.h>
#include <ATen/ops/atanh.h>
#include <ATen/ops/arctanh.h>
#include <ATen/ops/arctanh.h>
#include <ATen/ops/arctanh.h>
#include <ATen/ops/as_strided.h>
#include <ATen/ops/as_strided.h>
#include <ATen/ops/asin.h>
#include <ATen/ops/asin.h>
#include <ATen/ops/asin.h>
#include <ATen/ops/arcsin.h>
#include <ATen/ops/arcsin.h>
#include <ATen/ops/arcsin.h>
#include <ATen/ops/atan.h>
#include <ATen/ops/atan.h>
#include <ATen/ops/atan.h>
#include <ATen/ops/arctan.h>
#include <ATen/ops/arctan.h>
#include <ATen/ops/arctan.h>
#include <ATen/ops/atleast_1d.h>
#include <ATen/ops/atleast_1d.h>
#include <ATen/ops/atleast_2d.h>
#include <ATen/ops/atleast_2d.h>
#include <ATen/ops/atleast_3d.h>
#include <ATen/ops/atleast_3d.h>
#include <ATen/ops/baddbmm.h>
#include <ATen/ops/baddbmm.h>
#include <ATen/ops/baddbmm.h>
#include <ATen/ops/bartlett_window.h>
#include <ATen/ops/bartlett_window.h>
#include <ATen/ops/batch_norm.h>
#include <ATen/ops/quantized_batch_norm.h>
#include <ATen/ops/_batch_norm_impl_index.h>
#include <ATen/ops/_batch_norm_impl_index_backward.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/bilinear.h>
#include <ATen/ops/binary_cross_entropy.h>
#include <ATen/ops/binary_cross_entropy.h>
#include <ATen/ops/binary_cross_entropy_backward.h>
#include <ATen/ops/binary_cross_entropy_backward.h>
#include <ATen/ops/binary_cross_entropy_with_logits.h>
#include <ATen/ops/bincount.h>
#include <ATen/ops/bitwise_not.h>
#include <ATen/ops/bitwise_not.h>
#include <ATen/ops/bitwise_not.h>
#include <ATen/ops/copysign.h>
#include <ATen/ops/copysign.h>
#include <ATen/ops/copysign.h>
#include <ATen/ops/copysign.h>
#include <ATen/ops/copysign.h>
#include <ATen/ops/copysign.h>
#include <ATen/ops/_lazy_clone.h>
#include <ATen/ops/logical_not.h>
#include <ATen/ops/logical_not.h>
#include <ATen/ops/logical_not.h>
#include <ATen/ops/logical_xor.h>
#include <ATen/ops/logical_xor.h>
#include <ATen/ops/logical_xor.h>
#include <ATen/ops/logical_and.h>
#include <ATen/ops/logical_and.h>
#include <ATen/ops/logical_and.h>
#include <ATen/ops/logical_or.h>
#include <ATen/ops/logical_or.h>
#include <ATen/ops/logical_or.h>
#include <ATen/ops/blackman_window.h>
#include <ATen/ops/blackman_window.h>
#include <ATen/ops/bmm.h>
#include <ATen/ops/bmm.h>
#include <ATen/ops/broadcast_tensors.h>
#include <ATen/ops/broadcast_to.h>
#include <ATen/ops/_sparse_broadcast_to.h>
#include <ATen/ops/cat.h>
#include <ATen/ops/cat.h>
#include <ATen/ops/cat.h>
#include <ATen/ops/cat.h>
#include <ATen/ops/concat.h>
#include <ATen/ops/concat.h>
#include <ATen/ops/concat.h>
#include <ATen/ops/concat.h>
#include <ATen/ops/concatenate.h>
#include <ATen/ops/concatenate.h>
#include <ATen/ops/concatenate.h>
#include <ATen/ops/concatenate.h>
#include <ATen/ops/block_diag.h>
#include <ATen/ops/ceil.h>
#include <ATen/ops/ceil.h>
#include <ATen/ops/ceil.h>
#include <ATen/ops/chain_matmul.h>
#include <ATen/ops/chain_matmul.h>
#include <ATen/ops/unsafe_chunk.h>
#include <ATen/ops/chunk.h>
#include <ATen/ops/tensor_split.h>
#include <ATen/ops/tensor_split.h>
#include <ATen/ops/tensor_split.h>
#include <ATen/ops/clamp.h>
#include <ATen/ops/clamp.h>
#include <ATen/ops/clamp.h>
#include <ATen/ops/clamp.h>
#include <ATen/ops/clamp.h>
#include <ATen/ops/clamp.h>
#include <ATen/ops/clamp_max.h>
#include <ATen/ops/clamp_max.h>
#include <ATen/ops/clamp_max.h>
#include <ATen/ops/clamp_max.h>
#include <ATen/ops/clamp_max.h>
#include <ATen/ops/clamp_max.h>
#include <ATen/ops/clamp_min.h>
#include <ATen/ops/clamp_min.h>
#include <ATen/ops/clamp_min.h>
#include <ATen/ops/clamp_min.h>
#include <ATen/ops/clamp_min.h>
#include <ATen/ops/clamp_min.h>
#include <ATen/ops/clip.h>
#include <ATen/ops/clip.h>
#include <ATen/ops/clip.h>
#include <ATen/ops/clip.h>
#include <ATen/ops/clip.h>
#include <ATen/ops/clip.h>
#include <ATen/ops/cudnn_is_acceptable.h>
#include <ATen/ops/complex.h>
#include <ATen/ops/complex.h>
#include <ATen/ops/polar.h>
#include <ATen/ops/polar.h>
#include <ATen/ops/constant_pad_nd.h>
#include <ATen/ops/contiguous.h>
#include <ATen/ops/convolution.h>
#include <ATen/ops/convolution_backward.h>
#include <ATen/ops/convolution_overrideable.h>
#include <ATen/ops/convolution_backward_overrideable.h>
#include <ATen/ops/_convolution.h>
#include <ATen/ops/_convolution.h>
#include <ATen/ops/_convolution_mode.h>
#include <ATen/ops/_convolution_double_backward.h>
#include <ATen/ops/conv1d.h>
#include <ATen/ops/conv2d.h>
#include <ATen/ops/conv3d.h>
#include <ATen/ops/conv1d.h>
#include <ATen/ops/conv2d.h>
#include <ATen/ops/conv3d.h>
#include <ATen/ops/conv_tbc.h>
#include <ATen/ops/conv_tbc_backward.h>
#include <ATen/ops/conv_transpose1d.h>
#include <ATen/ops/conv_transpose2d.h>
#include <ATen/ops/conv_transpose3d.h>
#include <ATen/ops/copy.h>
#include <ATen/ops/copy.h>
#include <ATen/ops/_copy_from.h>
#include <ATen/ops/_copy_from_and_resize.h>
#include <ATen/ops/cos.h>
#include <ATen/ops/cos.h>
#include <ATen/ops/cos.h>
#include <ATen/ops/cosh.h>
#include <ATen/ops/cosh.h>
#include <ATen/ops/cosh.h>
#include <ATen/ops/cosine_embedding_loss.h>
#include <ATen/ops/count_nonzero.h>
#include <ATen/ops/count_nonzero.h>
#include <ATen/ops/cov.h>
#include <ATen/ops/corrcoef.h>
#include <ATen/ops/cudnn_affine_grid_generator.h>
#include <ATen/ops/cudnn_affine_grid_generator_backward.h>
#include <ATen/ops/cudnn_batch_norm.h>
#include <ATen/ops/cudnn_batch_norm_backward.h>
#include <ATen/ops/cudnn_convolution.h>
#include <ATen/ops/cudnn_convolution.h>
#include <ATen/ops/cudnn_convolution_transpose.h>
#include <ATen/ops/_mps_convolution_transpose.h>
#include <ATen/ops/mps_convolution_transpose_backward.h>
#include <ATen/ops/cudnn_convolution_relu.h>
#include <ATen/ops/cudnn_convolution_add_relu.h>
#include <ATen/ops/cudnn_grid_sampler.h>
#include <ATen/ops/cudnn_grid_sampler_backward.h>
#include <ATen/ops/cummax.h>
#include <ATen/ops/cummax.h>
#include <ATen/ops/cummax.h>
#include <ATen/ops/cummax.h>
#include <ATen/ops/_cummax_helper.h>
#include <ATen/ops/cummin.h>
#include <ATen/ops/cummin.h>
#include <ATen/ops/cummin.h>
#include <ATen/ops/cummin.h>
#include <ATen/ops/_cummin_helper.h>
#include <ATen/ops/cummaxmin_backward.h>
#include <ATen/ops/cumprod.h>
#include <ATen/ops/cumprod.h>
#include <ATen/ops/cumprod.h>
#include <ATen/ops/cumprod.h>
#include <ATen/ops/cumprod.h>
#include <ATen/ops/cumprod.h>
#include <ATen/ops/cumprod_backward.h>
#include <ATen/ops/cumsum.h>
#include <ATen/ops/cumsum.h>
#include <ATen/ops/cumsum.h>
#include <ATen/ops/cumsum.h>
#include <ATen/ops/cumsum.h>
#include <ATen/ops/cumsum.h>
#include <ATen/ops/cumulative_trapezoid.h>
#include <ATen/ops/cumulative_trapezoid.h>
#include <ATen/ops/ctc_loss.h>
#include <ATen/ops/ctc_loss.h>
#include <ATen/ops/_ctc_loss.h>
#include <ATen/ops/_ctc_loss.h>
#include <ATen/ops/_ctc_loss_backward.h>
#include <ATen/ops/_ctc_loss_backward.h>
#include <ATen/ops/diag_embed.h>
#include <ATen/ops/diagflat.h>
#include <ATen/ops/diagonal.h>
#include <ATen/ops/linalg_diagonal.h>
#include <ATen/ops/diagonal.h>
#include <ATen/ops/diagonal_backward.h>
#include <ATen/ops/fill_diagonal.h>
#include <ATen/ops/diff.h>
#include <ATen/ops/diff.h>
#include <ATen/ops/gradient.h>
#include <ATen/ops/gradient.h>
#include <ATen/ops/gradient.h>
#include <ATen/ops/gradient.h>
#include <ATen/ops/gradient.h>
#include <ATen/ops/gradient.h>
#include <ATen/ops/gradient.h>
#include <ATen/ops/div.h>
#include <ATen/ops/div.h>
#include <ATen/ops/div.h>
#include <ATen/ops/div.h>
#include <ATen/ops/div.h>
#include <ATen/ops/div.h>
#include <ATen/ops/div.h>
#include <ATen/ops/div.h>
#include <ATen/ops/div.h>
#include <ATen/ops/div.h>
#include <ATen/ops/divide.h>
#include <ATen/ops/divide.h>
#include <ATen/ops/divide.h>
#include <ATen/ops/divide.h>
#include <ATen/ops/divide.h>
#include <ATen/ops/divide.h>
#include <ATen/ops/divide.h>
#include <ATen/ops/divide.h>
#include <ATen/ops/divide.h>
#include <ATen/ops/divide.h>
#include <ATen/ops/true_divide.h>
#include <ATen/ops/true_divide.h>
#include <ATen/ops/true_divide.h>
#include <ATen/ops/true_divide.h>
#include <ATen/ops/true_divide.h>
#include <ATen/ops/dot.h>
#include <ATen/ops/dot.h>
#include <ATen/ops/vdot.h>
#include <ATen/ops/vdot.h>
#include <ATen/ops/einsum.h>
#include <ATen/ops/embedding.h>
#include <ATen/ops/embedding_backward.h>
#include <ATen/ops/embedding_dense_backward.h>
#include <ATen/ops/embedding_renorm.h>
#include <ATen/ops/embedding_sparse_backward.h>
#include <ATen/ops/_embedding_bag_forward_only.h>
#include <ATen/ops/_rowwise_prune.h>
#include <ATen/ops/row_stack.h>
#include <ATen/ops/row_stack.h>
#include <ATen/ops/embedding_bag.h>
#include <ATen/ops/embedding_bag.h>
#include <ATen/ops/_embedding_bag.h>
#include <ATen/ops/_embedding_bag_backward.h>
#include <ATen/ops/_embedding_bag_sparse_backward.h>
#include <ATen/ops/_embedding_bag_dense_backward.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_permuted.h>
#include <ATen/ops/new_empty.h>
#include <ATen/ops/new_empty_strided.h>
#include <ATen/ops/new_full.h>
#include <ATen/ops/new_zeros.h>
#include <ATen/ops/new_ones.h>
#include <ATen/ops/_empty_affine_quantized.h>
#include <ATen/ops/_empty_per_channel_affine_quantized.h>
#include <ATen/ops/resize.h>
#include <ATen/ops/_resize_output.h>
#include <ATen/ops/empty_quantized.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/empty_strided.h>
#include <ATen/ops/erf.h>
#include <ATen/ops/erf.h>
#include <ATen/ops/erf.h>
#include <ATen/ops/erfc.h>
#include <ATen/ops/erfc.h>
#include <ATen/ops/erfc.h>
#include <ATen/ops/exp.h>
#include <ATen/ops/exp.h>
#include <ATen/ops/exp.h>
#include <ATen/ops/exp2.h>
#include <ATen/ops/exp2.h>
#include <ATen/ops/exp2.h>
#include <ATen/ops/expm1.h>
#include <ATen/ops/expm1.h>
#include <ATen/ops/expm1.h>
#include <ATen/ops/expand.h>
#include <ATen/ops/expand_as.h>
#include <ATen/ops/eye.h>
#include <ATen/ops/eye.h>
#include <ATen/ops/eye.h>
#include <ATen/ops/eye.h>
#include <ATen/ops/flatten.h>
#include <ATen/ops/flatten.h>
#include <ATen/ops/flatten.h>
#include <ATen/ops/flatten.h>
#include <ATen/ops/unflatten.h>
#include <ATen/ops/unflatten.h>
#include <ATen/ops/fill.h>
#include <ATen/ops/fill.h>
#include <ATen/ops/fill.h>
#include <ATen/ops/fill.h>
#include <ATen/ops/floor.h>
#include <ATen/ops/floor.h>
#include <ATen/ops/floor.h>
#include <ATen/ops/floor_divide.h>
#include <ATen/ops/floor_divide.h>
#include <ATen/ops/floor_divide.h>
#include <ATen/ops/floor_divide.h>
#include <ATen/ops/floor_divide.h>
#include <ATen/ops/frac.h>
#include <ATen/ops/frac.h>
#include <ATen/ops/frac.h>
#include <ATen/ops/full.h>
#include <ATen/ops/full.h>
#include <ATen/ops/full.h>
#include <ATen/ops/full_like.h>
#include <ATen/ops/from_file.h>
#include <ATen/ops/gcd.h>
#include <ATen/ops/gcd.h>
#include <ATen/ops/gcd.h>
#include <ATen/ops/lcm.h>
#include <ATen/ops/lcm.h>
#include <ATen/ops/lcm.h>
#include <ATen/ops/grid_sampler.h>
#include <ATen/ops/grid_sampler_2d.h>
#include <ATen/ops/grid_sampler_2d_backward.h>
#include <ATen/ops/_grid_sampler_2d_cpu_fallback.h>
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward.h>
#include <ATen/ops/grid_sampler_3d.h>
#include <ATen/ops/grid_sampler_3d_backward.h>
#include <ATen/ops/hann_window.h>
#include <ATen/ops/hann_window.h>
#include <ATen/ops/hamming_window.h>
#include <ATen/ops/hamming_window.h>
#include <ATen/ops/hamming_window.h>
#include <ATen/ops/hamming_window.h>
#include <ATen/ops/kaiser_window.h>
#include <ATen/ops/kaiser_window.h>
#include <ATen/ops/kaiser_window.h>
#include <ATen/ops/hinge_embedding_loss.h>
#include <ATen/ops/group_norm.h>
#include <ATen/ops/native_group_norm.h>
#include <ATen/ops/native_group_norm_backward.h>
#include <ATen/ops/_fft_r2c.h>
#include <ATen/ops/_fft_r2c.h>
#include <ATen/ops/_fft_c2r.h>
#include <ATen/ops/_fft_c2r.h>
#include <ATen/ops/_fft_c2c.h>
#include <ATen/ops/_fft_c2c.h>
#include <ATen/ops/_validate_compressed_sparse_indices.h>
#include <ATen/ops/_cufft_get_plan_cache_size.h>
#include <ATen/ops/_cufft_get_plan_cache_max_size.h>
#include <ATen/ops/_cufft_set_plan_cache_max_size.h>
#include <ATen/ops/_cufft_clear_plan_cache.h>
#include <ATen/ops/index.h>
#include <ATen/ops/index.h>
#include <ATen/ops/_unsafe_index.h>
#include <ATen/ops/_unsafe_masked_index.h>
#include <ATen/ops/_unsafe_masked_index_put_accumulate.h>
#include <ATen/ops/index_copy.h>
#include <ATen/ops/index_copy.h>
#include <ATen/ops/index_copy.h>
#include <ATen/ops/index_copy.h>
#include <ATen/ops/index_copy.h>
#include <ATen/ops/index_put.h>
#include <ATen/ops/index_put.h>
#include <ATen/ops/_unsafe_index_put.h>
#include <ATen/ops/_index_put_impl.h>
#include <ATen/ops/instance_norm.h>
#include <ATen/ops/isclose.h>
#include <ATen/ops/isin.h>
#include <ATen/ops/isin.h>
#include <ATen/ops/isin.h>
#include <ATen/ops/isin.h>
#include <ATen/ops/isin.h>
#include <ATen/ops/isin.h>
#include <ATen/ops/isnan.h>
#include <ATen/ops/is_distributed.h>
#include <ATen/ops/is_floating_point.h>
#include <ATen/ops/is_complex.h>
#include <ATen/ops/is_conj.h>
#include <ATen/ops/_is_zerotensor.h>
#include <ATen/ops/is_neg.h>
#include <ATen/ops/isreal.h>
#include <ATen/ops/is_nonzero.h>
#include <ATen/ops/is_same_size.h>
#include <ATen/ops/is_signed.h>
#include <ATen/ops/is_inference.h>
#include <ATen/ops/kl_div.h>
#include <ATen/ops/kron.h>
#include <ATen/ops/kron.h>
#include <ATen/ops/kthvalue.h>
#include <ATen/ops/kthvalue.h>
#include <ATen/ops/kthvalue.h>
#include <ATen/ops/kthvalue.h>
#include <ATen/ops/layer_norm.h>
#include <ATen/ops/native_layer_norm.h>
#include <ATen/ops/native_layer_norm_backward.h>
#include <ATen/ops/rms_norm.h>
#include <ATen/ops/nan_to_num.h>
#include <ATen/ops/nan_to_num.h>
#include <ATen/ops/nan_to_num.h>
#include <ATen/ops/linear.h>
#include <ATen/ops/linear_backward.h>
#include <ATen/ops/linear.h>
#include <ATen/ops/mkldnn_linear.h>
#include <ATen/ops/mkldnn_linear_backward_input.h>
#include <ATen/ops/mkldnn_linear_backward_weights.h>
#include <ATen/ops/mkldnn_linear_backward.h>
#include <ATen/ops/_cslt_compress.h>
#include <ATen/ops/_cslt_sparse_mm.h>
#include <ATen/ops/_cslt_sparse_mm_search.h>
#include <ATen/ops/_sparse_semi_structured_tile.h>
#include <ATen/ops/_sparse_semi_structured_apply.h>
#include <ATen/ops/_sparse_semi_structured_apply_dense.h>
#include <ATen/ops/_sparse_semi_structured_linear.h>
#include <ATen/ops/_sparse_semi_structured_mm.h>
#include <ATen/ops/_sparse_semi_structured_addmm.h>
#include <ATen/ops/_mixed_dtypes_linear.h>
#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation.h>
#include <ATen/ops/fbgemm_linear_int8_weight.h>
#include <ATen/ops/fbgemm_linear_quantize_weight.h>
#include <ATen/ops/fbgemm_pack_gemm_matrix_fp16.h>
#include <ATen/ops/_wrapped_linear_prepack.h>
#include <ATen/ops/_wrapped_quantized_linear_prepacked.h>
#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation.h>
#include <ATen/ops/fbgemm_linear_fp16_weight.h>
#include <ATen/ops/fbgemm_pack_quantized_matrix.h>
#include <ATen/ops/fbgemm_pack_quantized_matrix.h>
#include <ATen/ops/ldexp.h>
#include <ATen/ops/ldexp.h>
#include <ATen/ops/ldexp.h>
#include <ATen/ops/linspace.h>
#include <ATen/ops/linspace.h>
#include <ATen/ops/linspace.h>
#include <ATen/ops/linspace.h>
#include <ATen/ops/linspace.h>
#include <ATen/ops/linspace.h>
#include <ATen/ops/linspace.h>
#include <ATen/ops/linspace.h>
#include <ATen/ops/log.h>
#include <ATen/ops/log.h>
#include <ATen/ops/log.h>
#include <ATen/ops/log10.h>
#include <ATen/ops/log10.h>
#include <ATen/ops/log10.h>
#include <ATen/ops/log1p.h>
#include <ATen/ops/log1p.h>
#include <ATen/ops/log1p.h>
#include <ATen/ops/log2.h>
#include <ATen/ops/log2.h>
#include <ATen/ops/log2.h>
#include <ATen/ops/logaddexp.h>
#include <ATen/ops/logaddexp.h>
#include <ATen/ops/logaddexp2.h>
#include <ATen/ops/logaddexp2.h>
#include <ATen/ops/xlogy.h>
#include <ATen/ops/xlogy.h>
#include <ATen/ops/xlogy.h>
#include <ATen/ops/xlogy.h>
#include <ATen/ops/xlogy.h>
#include <ATen/ops/xlogy.h>
#include <ATen/ops/xlogy.h>
#include <ATen/ops/xlogy.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/log_softmax.h>
#include <ATen/ops/log_softmax.h>
#include <ATen/ops/log_softmax.h>
#include <ATen/ops/_log_softmax.h>
#include <ATen/ops/_log_softmax.h>
#include <ATen/ops/_log_softmax_backward_data.h>
#include <ATen/ops/_log_softmax_backward_data.h>
#include <ATen/ops/_logcumsumexp.h>
#include <ATen/ops/_logcumsumexp.h>
#include <ATen/ops/logcumsumexp.h>
#include <ATen/ops/logcumsumexp.h>
#include <ATen/ops/logcumsumexp.h>
#include <ATen/ops/logcumsumexp.h>
#include <ATen/ops/logsumexp.h>
#include <ATen/ops/logsumexp.h>
#include <ATen/ops/logsumexp.h>
#include <ATen/ops/logsumexp.h>
#include <ATen/ops/margin_ranking_loss.h>
#include <ATen/ops/matmul.h>
#include <ATen/ops/matmul_backward.h>
#include <ATen/ops/matmul.h>
#include <ATen/ops/matrix_power.h>
#include <ATen/ops/matrix_power.h>
#include <ATen/ops/matrix_exp.h>
#include <ATen/ops/matrix_exp_backward.h>
#include <ATen/ops/_aminmax.h>
#include <ATen/ops/_aminmax.h>
#include <ATen/ops/aminmax.h>
#include <ATen/ops/aminmax.h>
#include <ATen/ops/_compute_linear_combination.h>
#include <ATen/ops/_compute_linear_combination.h>
#include <ATen/ops/max.h>
#include <ATen/ops/max.h>
#include <ATen/ops/max.h>
#include <ATen/ops/max.h>
#include <ATen/ops/value_selecting_reduction_backward.h>
#include <ATen/ops/amax.h>
#include <ATen/ops/amax.h>
#include <ATen/ops/max_pool1d_with_indices.h>
#include <ATen/ops/max_pool1d.h>
#include <ATen/ops/max_pool2d.h>
#include <ATen/ops/max_pool2d_backward.h>
#include <ATen/ops/mkldnn_max_pool2d.h>
#include <ATen/ops/mkldnn_max_pool2d_backward.h>
#include <ATen/ops/mkldnn_max_pool3d.h>
#include <ATen/ops/mkldnn_max_pool3d_backward.h>
#include <ATen/ops/quantized_max_pool1d.h>
#include <ATen/ops/quantized_max_pool2d.h>
#include <ATen/ops/quantized_max_pool3d.h>
#include <ATen/ops/max_pool3d.h>
#include <ATen/ops/mean.h>
#include <ATen/ops/mean.h>
#include <ATen/ops/mean.h>
#include <ATen/ops/mean.h>
#include <ATen/ops/mean.h>
#include <ATen/ops/mean.h>
#include <ATen/ops/nanmean.h>
#include <ATen/ops/nanmean.h>
#include <ATen/ops/median.h>
#include <ATen/ops/median.h>
#include <ATen/ops/median.h>
#include <ATen/ops/median.h>
#include <ATen/ops/median.h>
#include <ATen/ops/nanmedian.h>
#include <ATen/ops/nanmedian.h>
#include <ATen/ops/nanmedian.h>
#include <ATen/ops/nanmedian.h>
#include <ATen/ops/nanmedian.h>
#include <ATen/ops/min.h>
#include <ATen/ops/min.h>
#include <ATen/ops/min.h>
#include <ATen/ops/min.h>
#include <ATen/ops/amin.h>
#include <ATen/ops/amin.h>
#include <ATen/ops/_mps_convolution.h>
#include <ATen/ops/mps_convolution_backward.h>
#include <ATen/ops/mkldnn_convolution.h>
#include <ATen/ops/mkldnn_rnn_layer.h>
#include <ATen/ops/mkldnn_rnn_layer_backward.h>
#include <ATen/ops/miopen_batch_norm.h>
#include <ATen/ops/miopen_batch_norm_backward.h>
#include <ATen/ops/miopen_convolution.h>
#include <ATen/ops/miopen_convolution_transpose.h>
#include <ATen/ops/miopen_depthwise_convolution.h>
#include <ATen/ops/miopen_convolution_relu.h>
#include <ATen/ops/miopen_convolution_add_relu.h>
#include <ATen/ops/miopen_rnn.h>
#include <ATen/ops/miopen_rnn_backward.h>
#include <ATen/ops/mm.h>
#include <ATen/ops/mm.h>
#include <ATen/ops/_int_mm.h>
#include <ATen/ops/_int_mm.h>
#include <ATen/ops/_convert_weight_to_int4pack.h>
#include <ATen/ops/_weight_int4pack_mm.h>
#include <ATen/ops/_convert_weight_to_int4pack_for_cpu.h>
#include <ATen/ops/_weight_int4pack_mm_for_cpu.h>
#include <ATen/ops/_dyn_quant_pack_4bit_weight.h>
#include <ATen/ops/_dyn_quant_matmul_4bit.h>
#include <ATen/ops/_weight_int8pack_mm.h>
#include <ATen/ops/_sparse_mm.h>
#include <ATen/ops/_sparse_mm.h>
#include <ATen/ops/_sparse_sparse_matmul.h>
#include <ATen/ops/mode.h>
#include <ATen/ops/mode.h>
#include <ATen/ops/mode.h>
#include <ATen/ops/mode.h>
#include <ATen/ops/mul.h>
#include <ATen/ops/mul.h>
#include <ATen/ops/mul.h>
#include <ATen/ops/mul.h>
#include <ATen/ops/mul.h>
#include <ATen/ops/multiply.h>
#include <ATen/ops/multiply.h>
#include <ATen/ops/multiply.h>
#include <ATen/ops/multiply.h>
#include <ATen/ops/multiply.h>
#include <ATen/ops/mv.h>
#include <ATen/ops/mv.h>
#include <ATen/ops/mvlgamma.h>
#include <ATen/ops/mvlgamma.h>
#include <ATen/ops/mvlgamma.h>
#include <ATen/ops/narrow_copy.h>
#include <ATen/ops/narrow_copy.h>
#include <ATen/ops/narrow.h>
#include <ATen/ops/narrow.h>
#include <ATen/ops/native_batch_norm.h>
#include <ATen/ops/native_batch_norm.h>
#include <ATen/ops/_native_batch_norm_legit.h>
#include <ATen/ops/_native_batch_norm_legit_no_training.h>
#include <ATen/ops/_native_batch_norm_legit.h>
#include <ATen/ops/_native_batch_norm_legit.h>
#include <ATen/ops/_native_batch_norm_legit.h>
#include <ATen/ops/batch_norm_stats.h>
#include <ATen/ops/batch_norm_elemt.h>
#include <ATen/ops/batch_norm_elemt.h>
#include <ATen/ops/batch_norm_gather_stats.h>
#include <ATen/ops/batch_norm_gather_stats_with_counts.h>
#include <ATen/ops/native_batch_norm_backward.h>
#include <ATen/ops/batch_norm_backward_reduce.h>
#include <ATen/ops/batch_norm_backward_elemt.h>
#include <ATen/ops/batch_norm_update_stats.h>
#include <ATen/ops/is_vulkan_available.h>
#include <ATen/ops/_nnpack_available.h>
#include <ATen/ops/_nnpack_spatial_convolution.h>
#include <ATen/ops/ones.h>
#include <ATen/ops/ones.h>
#include <ATen/ops/ones.h>
#include <ATen/ops/ones_like.h>
#include <ATen/ops/pairwise_distance.h>
#include <ATen/ops/cdist.h>
#include <ATen/ops/_euclidean_dist.h>
#include <ATen/ops/_cdist_forward.h>
#include <ATen/ops/_cdist_backward.h>
#include <ATen/ops/pdist.h>
#include <ATen/ops/_pdist_forward.h>
#include <ATen/ops/_pdist_backward.h>
#include <ATen/ops/cosine_similarity.h>
#include <ATen/ops/permute.h>
#include <ATen/ops/movedim.h>
#include <ATen/ops/movedim.h>
#include <ATen/ops/moveaxis.h>
#include <ATen/ops/moveaxis.h>
#include <ATen/ops/numpy_T.h>
#include <ATen/ops/matrix_H.h>
#include <ATen/ops/mT.h>
#include <ATen/ops/mH.h>
#include <ATen/ops/adjoint.h>
#include <ATen/ops/pixel_shuffle.h>
#include <ATen/ops/pixel_unshuffle.h>
#include <ATen/ops/channel_shuffle.h>
#include <ATen/ops/native_channel_shuffle.h>
#include <ATen/ops/is_pinned.h>
#include <ATen/ops/pin_memory.h>
#include <ATen/ops/_pin_memory.h>
#include <ATen/ops/pinverse.h>
#include <ATen/ops/poisson_nll_loss.h>
#include <ATen/ops/rad2deg.h>
#include <ATen/ops/rad2deg.h>
#include <ATen/ops/rad2deg.h>
#include <ATen/ops/deg2rad.h>
#include <ATen/ops/deg2rad.h>
#include <ATen/ops/deg2rad.h>
#include <ATen/ops/scalar_tensor.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand_like.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint_like.h>
#include <ATen/ops/randint_like.h>
#include <ATen/ops/randn.h>
#include <ATen/ops/randn.h>
#include <ATen/ops/randn.h>
#include <ATen/ops/randn.h>
#include <ATen/ops/randn.h>
#include <ATen/ops/randn.h>
#include <ATen/ops/randn_like.h>
#include <ATen/ops/randperm.h>
#include <ATen/ops/randperm.h>
#include <ATen/ops/randperm.h>
#include <ATen/ops/randperm.h>
#include <ATen/ops/range.h>
#include <ATen/ops/range.h>
#include <ATen/ops/range.h>
#include <ATen/ops/range.h>
#include <ATen/ops/ravel.h>
#include <ATen/ops/reciprocal.h>
#include <ATen/ops/reciprocal.h>
#include <ATen/ops/reciprocal.h>
#include <ATen/ops/neg.h>
#include <ATen/ops/neg.h>
#include <ATen/ops/neg.h>
#include <ATen/ops/negative.h>
#include <ATen/ops/negative.h>
#include <ATen/ops/negative.h>
#include <ATen/ops/repeat.h>
#include <ATen/ops/repeat_interleave.h>
#include <ATen/ops/repeat_interleave.h>
#include <ATen/ops/repeat_interleave.h>
#include <ATen/ops/reshape.h>
#include <ATen/ops/_reshape_copy.h>
#include <ATen/ops/_reshape_alias.h>
#include <ATen/ops/_mkldnn_reshape.h>
#include <ATen/ops/reshape_as.h>
#include <ATen/ops/round.h>
#include <ATen/ops/round.h>
#include <ATen/ops/round.h>
#include <ATen/ops/round.h>
#include <ATen/ops/round.h>
#include <ATen/ops/round.h>
#include <ATen/ops/rrelu.h>
#include <ATen/ops/rrelu.h>
#include <ATen/ops/relu.h>
#include <ATen/ops/relu.h>
#include <ATen/ops/relu6.h>
#include <ATen/ops/relu6.h>
#include <ATen/ops/prelu.h>
#include <ATen/ops/_prelu_kernel.h>
#include <ATen/ops/_prelu_kernel_backward.h>
#include <ATen/ops/gelu.h>
#include <ATen/ops/gelu.h>
#include <ATen/ops/gelu.h>
#include <ATen/ops/gelu_backward.h>
#include <ATen/ops/gelu_backward.h>
#include <ATen/ops/infinitely_differentiable_gelu_backward.h>
#include <ATen/ops/hardshrink.h>
#include <ATen/ops/hardshrink.h>
#include <ATen/ops/hardshrink_backward.h>
#include <ATen/ops/hardshrink_backward.h>
#include <ATen/ops/rsqrt.h>
#include <ATen/ops/rsqrt.h>
#include <ATen/ops/rsqrt.h>
#include <ATen/ops/select.h>
#include <ATen/ops/select.h>
#include <ATen/ops/select_backward.h>
#include <ATen/ops/_nested_select_backward.h>
#include <ATen/ops/selu.h>
#include <ATen/ops/selu.h>
#include <ATen/ops/celu.h>
#include <ATen/ops/celu.h>
#include <ATen/ops/silu.h>
#include <ATen/ops/silu.h>
#include <ATen/ops/silu.h>
#include <ATen/ops/silu_backward.h>
#include <ATen/ops/silu_backward.h>
#include <ATen/ops/mish.h>
#include <ATen/ops/mish.h>
#include <ATen/ops/mish.h>
#include <ATen/ops/mish_backward.h>
#include <ATen/ops/sigmoid.h>
#include <ATen/ops/sigmoid.h>
#include <ATen/ops/sigmoid.h>
#include <ATen/ops/logit.h>
#include <ATen/ops/logit.h>
#include <ATen/ops/logit.h>
#include <ATen/ops/sin.h>
#include <ATen/ops/sin.h>
#include <ATen/ops/sin.h>
#include <ATen/ops/sinc.h>
#include <ATen/ops/sinc.h>
#include <ATen/ops/sinc.h>
#include <ATen/ops/sinh.h>
#include <ATen/ops/sinh.h>
#include <ATen/ops/sinh.h>
#include <ATen/ops/detach.h>
#include <ATen/ops/detach.h>
#include <ATen/ops/size.h>
#include <ATen/ops/size.h>
#include <ATen/ops/sym_size.h>
#include <ATen/ops/sym_numel.h>
#include <ATen/ops/sym_storage_offset.h>
#include <ATen/ops/slice.h>
#include <ATen/ops/slice_backward.h>
#include <ATen/ops/slice_inverse.h>
#include <ATen/ops/slice_scatter.h>
#include <ATen/ops/select_scatter.h>
#include <ATen/ops/diagonal_scatter.h>
#include <ATen/ops/as_strided_scatter.h>
#include <ATen/ops/smm.h>
#include <ATen/ops/softmax.h>
#include <ATen/ops/softmax.h>
#include <ATen/ops/softmax.h>
#include <ATen/ops/_softmax.h>
#include <ATen/ops/_softmax.h>
#include <ATen/ops/_softmax_backward_data.h>
#include <ATen/ops/_softmax_backward_data.h>
#include <ATen/ops/unsafe_split.h>
#include <ATen/ops/split.h>
#include <ATen/ops/split.h>
#include <ATen/ops/unsafe_split_with_sizes.h>
#include <ATen/ops/split_with_sizes.h>
#include <ATen/ops/hsplit.h>
#include <ATen/ops/hsplit.h>
#include <ATen/ops/vsplit.h>
#include <ATen/ops/vsplit.h>
#include <ATen/ops/dsplit.h>
#include <ATen/ops/dsplit.h>
#include <ATen/ops/squeeze.h>
#include <ATen/ops/squeeze.h>
#include <ATen/ops/squeeze.h>
#include <ATen/ops/squeeze.h>
#include <ATen/ops/squeeze.h>
#include <ATen/ops/squeeze.h>
#include <ATen/ops/squeeze.h>
#include <ATen/ops/squeeze.h>
#include <ATen/ops/sspaddmm.h>
#include <ATen/ops/sspaddmm.h>
#include <ATen/ops/_chunk_cat.h>
#include <ATen/ops/_chunk_cat.h>
#include <ATen/ops/stack.h>
#include <ATen/ops/stack.h>
#include <ATen/ops/_stack.h>
#include <ATen/ops/_stack.h>
#include <ATen/ops/hstack.h>
#include <ATen/ops/hstack.h>
#include <ATen/ops/vstack.h>
#include <ATen/ops/vstack.h>
#include <ATen/ops/dstack.h>
#include <ATen/ops/dstack.h>
#include <ATen/ops/stft.h>
#include <ATen/ops/stft.h>
#include <ATen/ops/istft.h>
#include <ATen/ops/stride.h>
#include <ATen/ops/stride.h>
#include <ATen/ops/sym_stride.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/_nested_sum_backward.h>
#include <ATen/ops/nansum.h>
#include <ATen/ops/nansum.h>
#include <ATen/ops/sum_to_size.h>
#include <ATen/ops/sqrt.h>
#include <ATen/ops/sqrt.h>
#include <ATen/ops/sqrt.h>
#include <ATen/ops/square.h>
#include <ATen/ops/square.h>
#include <ATen/ops/square.h>
#include <ATen/ops/std.h>
#include <ATen/ops/std.h>
#include <ATen/ops/std.h>
#include <ATen/ops/std_mean.h>
#include <ATen/ops/std_mean.h>
#include <ATen/ops/std_mean.h>
#include <ATen/ops/std_mean.h>
#include <ATen/ops/std_mean.h>
#include <ATen/ops/std.h>
#include <ATen/ops/std.h>
#include <ATen/ops/std.h>
#include <ATen/ops/std.h>
#include <ATen/ops/std.h>
#include <ATen/ops/std.h>
#include <ATen/ops/prod.h>
#include <ATen/ops/prod.h>
#include <ATen/ops/prod.h>
#include <ATen/ops/prod.h>
#include <ATen/ops/prod.h>
#include <ATen/ops/t.h>
#include <ATen/ops/t.h>
#include <ATen/ops/tan.h>
#include <ATen/ops/tan.h>
#include <ATen/ops/tan.h>
#include <ATen/ops/tanh.h>
#include <ATen/ops/tanh.h>
#include <ATen/ops/tanh.h>
#include <ATen/ops/tensordot.h>
#include <ATen/ops/tensordot.h>
#include <ATen/ops/threshold.h>
#include <ATen/ops/threshold.h>
#include <ATen/ops/threshold.h>
#include <ATen/ops/threshold_backward.h>
#include <ATen/ops/threshold_backward.h>
#include <ATen/ops/tile.h>
#include <ATen/ops/transpose.h>
#include <ATen/ops/transpose.h>
#include <ATen/ops/_mkldnn_transpose.h>
#include <ATen/ops/transpose.h>
#include <ATen/ops/_mkldnn_transpose.h>
#include <ATen/ops/one_hot.h>
#include <ATen/ops/flip.h>
#include <ATen/ops/fliplr.h>
#include <ATen/ops/flipud.h>
#include <ATen/ops/roll.h>
#include <ATen/ops/rot90.h>
#include <ATen/ops/trapezoid.h>
#include <ATen/ops/trapezoid.h>
#include <ATen/ops/trapz.h>
#include <ATen/ops/trapz.h>
#include <ATen/ops/_transform_bias_rescale_qkv.h>
#include <ATen/ops/_nested_tensor_from_mask.h>
#include <ATen/ops/_nested_tensor_from_mask_left_aligned.h>
#include <ATen/ops/_nested_from_padded.h>
#include <ATen/ops/_nested_tensor_size.h>
#include <ATen/ops/_nested_tensor_strides.h>
#include <ATen/ops/_nested_tensor_storage_offsets.h>
#include <ATen/ops/_nested_from_padded_and_nested_example.h>
#include <ATen/ops/_nested_view_from_buffer.h>
#include <ATen/ops/_nested_view_from_buffer_copy.h>
#include <ATen/ops/_nested_view_from_jagged.h>
#include <ATen/ops/_nested_view_from_jagged_copy.h>
#include <ATen/ops/_nested_get_values.h>
#include <ATen/ops/_nested_get_values_copy.h>
#include <ATen/ops/_nested_get_offsets.h>
#include <ATen/ops/_nested_get_lengths.h>
#include <ATen/ops/_nested_get_ragged_idx.h>
#include <ATen/ops/_nested_get_min_seqlen.h>
#include <ATen/ops/_nested_get_max_seqlen.h>
#include <ATen/ops/_nested_get_jagged_dummy.h>
#include <ATen/ops/_nested_compute_contiguous_strides_offsets.h>
#include <ATen/ops/_trilinear.h>
#include <ATen/ops/triplet_margin_loss.h>
#include <ATen/ops/trunc.h>
#include <ATen/ops/trunc.h>
#include <ATen/ops/trunc.h>
#include <ATen/ops/fix.h>
#include <ATen/ops/fix.h>
#include <ATen/ops/fix.h>
#include <ATen/ops/type_as.h>
#include <ATen/ops/_has_compatible_shallow_copy_type.h>
#include <ATen/ops/_unique.h>
#include <ATen/ops/unique_dim.h>
#include <ATen/ops/unique_consecutive.h>
#include <ATen/ops/unique_dim_consecutive.h>
#include <ATen/ops/_unique2.h>
#include <ATen/ops/_unsafe_view.h>
#include <ATen/ops/unsqueeze.h>
#include <ATen/ops/unsqueeze.h>
#include <ATen/ops/vander.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var.h>
#include <ATen/ops/var_mean.h>
#include <ATen/ops/var_mean.h>
#include <ATen/ops/var_mean.h>
#include <ATen/ops/var_mean.h>
#include <ATen/ops/var_mean.h>
#include <ATen/ops/view_as.h>
#include <ATen/ops/where.h>
#include <ATen/ops/where.h>
#include <ATen/ops/where.h>
#include <ATen/ops/where.h>
#include <ATen/ops/where.h>
#include <ATen/ops/where.h>
#include <ATen/ops/norm_except_dim.h>
#include <ATen/ops/_weight_norm.h>
#include <ATen/ops/_weight_norm_interface.h>
#include <ATen/ops/_weight_norm_interface_backward.h>
#include <ATen/ops/_weight_norm_differentiable_backward.h>
#include <ATen/ops/zeros.h>
#include <ATen/ops/_efficientzerotensor.h>
#include <ATen/ops/zeros.h>
#include <ATen/ops/zeros.h>
#include <ATen/ops/zeros_like.h>
#include <ATen/ops/_standard_gamma_grad.h>
#include <ATen/ops/_standard_gamma.h>
#include <ATen/ops/_dirichlet_grad.h>
#include <ATen/ops/_sample_dirichlet.h>
#include <ATen/ops/poisson.h>
#include <ATen/ops/binomial.h>
#include <ATen/ops/native_norm.h>
#include <ATen/ops/native_norm.h>
#include <ATen/ops/_batch_norm_with_update.h>
#include <ATen/ops/_batch_norm_with_update.h>
#include <ATen/ops/_batch_norm_no_update.h>
#include <ATen/ops/batch_norm_backward.h>
#include <ATen/ops/_sparse_sum.h>
#include <ATen/ops/_sparse_sum.h>
#include <ATen/ops/_sparse_sum.h>
#include <ATen/ops/_sparse_sum.h>
#include <ATen/ops/_sparse_sum_backward.h>
#include <ATen/ops/_sparse_csr_sum.h>
#include <ATen/ops/_sparse_csr_prod.h>
#include <ATen/ops/_sparse_softmax.h>
#include <ATen/ops/_sparse_softmax.h>
#include <ATen/ops/_sparse_softmax.h>
#include <ATen/ops/_sparse_softmax_backward_data.h>
#include <ATen/ops/_sparse_log_softmax.h>
#include <ATen/ops/_sparse_log_softmax.h>
#include <ATen/ops/_sparse_log_softmax.h>
#include <ATen/ops/_sparse_log_softmax_backward_data.h>
#include <ATen/ops/_spdiags.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/frexp.h>
#include <ATen/ops/frexp.h>
#include <ATen/ops/frobenius_norm.h>
#include <ATen/ops/frobenius_norm.h>
#include <ATen/ops/nuclear_norm.h>
#include <ATen/ops/nuclear_norm.h>
#include <ATen/ops/nuclear_norm.h>
#include <ATen/ops/nuclear_norm.h>
#include <ATen/ops/clone.h>
#include <ATen/ops/positive.h>
#include <ATen/ops/resize_as.h>
#include <ATen/ops/resize_as_sparse.h>
#include <ATen/ops/zero.h>
#include <ATen/ops/sub.h>
#include <ATen/ops/sub.h>
#include <ATen/ops/sub.h>
#include <ATen/ops/sub.h>
#include <ATen/ops/sub.h>
#include <ATen/ops/subtract.h>
#include <ATen/ops/subtract.h>
#include <ATen/ops/subtract.h>
#include <ATen/ops/subtract.h>
#include <ATen/ops/subtract.h>
#include <ATen/ops/rsub.h>
#include <ATen/ops/heaviside.h>
#include <ATen/ops/heaviside.h>
#include <ATen/ops/heaviside.h>
#include <ATen/ops/rsub.h>
#include <ATen/ops/_sparse_addmm.h>
#include <ATen/ops/sparse_sampled_addmm.h>
#include <ATen/ops/sparse_sampled_addmm.h>
#include <ATen/ops/_sparse_mm_reduce_impl.h>
#include <ATen/ops/_sparse_mm_reduce_impl_backward.h>
#include <ATen/ops/addmm.h>
#include <ATen/ops/addmm.h>
#include <ATen/ops/addmm.h>
#include <ATen/ops/_addmm_activation.h>
#include <ATen/ops/_addmm_activation.h>
#include <ATen/ops/_scaled_mm.h>
#include <ATen/ops/_scaled_mm.h>
#include <ATen/ops/_scaled_grouped_mm.h>
#include <ATen/ops/_sparse_compressed_tensor_with_dims.h>
#include <ATen/ops/sparse_compressed_tensor.h>
#include <ATen/ops/sparse_csr_tensor.h>
#include <ATen/ops/sparse_csc_tensor.h>
#include <ATen/ops/sparse_bsr_tensor.h>
#include <ATen/ops/sparse_bsc_tensor.h>
#include <ATen/ops/sparse_compressed_tensor.h>
#include <ATen/ops/sparse_csr_tensor.h>
#include <ATen/ops/sparse_csc_tensor.h>
#include <ATen/ops/sparse_bsr_tensor.h>
#include <ATen/ops/sparse_bsc_tensor.h>
#include <ATen/ops/_sparse_compressed_tensor_unsafe.h>
#include <ATen/ops/_sparse_csr_tensor_unsafe.h>
#include <ATen/ops/_sparse_csc_tensor_unsafe.h>
#include <ATen/ops/_sparse_bsr_tensor_unsafe.h>
#include <ATen/ops/_sparse_bsc_tensor_unsafe.h>
#include <ATen/ops/sparse_coo_tensor.h>
#include <ATen/ops/sparse_coo_tensor.h>
#include <ATen/ops/sparse_coo_tensor.h>
#include <ATen/ops/_sparse_coo_tensor_unsafe.h>
#include <ATen/ops/_validate_sparse_coo_tensor_args.h>
#include <ATen/ops/_validate_sparse_compressed_tensor_args.h>
#include <ATen/ops/_validate_sparse_csr_tensor_args.h>
#include <ATen/ops/_validate_sparse_csc_tensor_args.h>
#include <ATen/ops/_validate_sparse_bsr_tensor_args.h>
#include <ATen/ops/_validate_sparse_bsc_tensor_args.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
#include <ATen/ops/sparse_resize.h>
#include <ATen/ops/sparse_resize_and_clear.h>
#include <ATen/ops/sparse_mask.h>
#include <ATen/ops/_sparse_mask_projection.h>
#include <ATen/ops/_to_cpu.h>
#include <ATen/ops/to_dense.h>
#include <ATen/ops/_to_dense.h>
#include <ATen/ops/to_dense_backward.h>
#include <ATen/ops/sparse_dim.h>
#include <ATen/ops/_dimI.h>
#include <ATen/ops/dense_dim.h>
#include <ATen/ops/_dimV.h>
#include <ATen/ops/_nnz.h>
#include <ATen/ops/coalesce.h>
#include <ATen/ops/_coalesce.h>
#include <ATen/ops/is_coalesced.h>
#include <ATen/ops/_indices.h>
#include <ATen/ops/_values.h>
#include <ATen/ops/_coalesced.h>
#include <ATen/ops/indices.h>
#include <ATen/ops/values.h>
#include <ATen/ops/crow_indices.h>
#include <ATen/ops/col_indices.h>
#include <ATen/ops/ccol_indices.h>
#include <ATen/ops/row_indices.h>
#include <ATen/ops/hspmm.h>
#include <ATen/ops/hspmm.h>
#include <ATen/ops/copy_sparse_to_sparse.h>
#include <ATen/ops/unbind.h>
#include <ATen/ops/unbind.h>
#include <ATen/ops/to_sparse.h>
#include <ATen/ops/_to_sparse.h>
#include <ATen/ops/to_sparse.h>
#include <ATen/ops/_to_sparse.h>
#include <ATen/ops/to_sparse_csr.h>
#include <ATen/ops/_to_sparse_csr.h>
#include <ATen/ops/to_sparse_csc.h>
#include <ATen/ops/_to_sparse_csc.h>
#include <ATen/ops/to_sparse_bsr.h>
#include <ATen/ops/_to_sparse_bsr.h>
#include <ATen/ops/to_sparse_bsc.h>
#include <ATen/ops/_to_sparse_bsc.h>
#include <ATen/ops/_to_sparse_semi_structured.h>
#include <ATen/ops/to_mkldnn.h>
#include <ATen/ops/mkldnn_reorder_conv2d_weight.h>
#include <ATen/ops/mkldnn_reorder_conv3d_weight.h>
#include <ATen/ops/to_mkldnn_backward.h>
#include <ATen/ops/quantize_per_tensor_dynamic.h>
#include <ATen/ops/quantize_per_tensor.h>
#include <ATen/ops/quantize_per_tensor.h>
#include <ATen/ops/quantize_per_tensor.h>
#include <ATen/ops/quantize_per_channel.h>
#include <ATen/ops/dequantize.h>
#include <ATen/ops/dequantize.h>
#include <ATen/ops/q_scale.h>
#include <ATen/ops/q_zero_point.h>
#include <ATen/ops/q_per_channel_scales.h>
#include <ATen/ops/q_per_channel_zero_points.h>
#include <ATen/ops/q_per_channel_axis.h>
#include <ATen/ops/int_repr.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor.h>
#include <ATen/ops/_make_per_channel_quantized_tensor.h>
#include <ATen/ops/qscheme.h>
#include <ATen/ops/fake_quantize_per_tensor_affine.h>
#include <ATen/ops/fake_quantize_per_tensor_affine.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward.h>
#include <ATen/ops/fake_quantize_per_channel_affine.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward.h>
#include <ATen/ops/fused_moving_avg_obs_fake_quant.h>
#include <ATen/ops/_fused_moving_avg_obs_fq_helper.h>
#include <ATen/ops/_choose_qparams_per_tensor.h>
#include <ATen/ops/_saturate_weight_to_fp16.h>
#include <ATen/ops/choose_qparams_optimized.h>
#include <ATen/ops/_autocast_to_reduced_precision.h>
#include <ATen/ops/_autocast_to_full_precision.h>
#include <ATen/ops/_to_copy.h>
#include <ATen/ops/to.h>
#include <ATen/ops/to.h>
#include <ATen/ops/to.h>
#include <ATen/ops/to.h>
#include <ATen/ops/meshgrid.h>
#include <ATen/ops/meshgrid.h>
#include <ATen/ops/cartesian_prod.h>
#include <ATen/ops/combinations.h>
#include <ATen/ops/item.h>
#include <ATen/ops/result_type.h>
#include <ATen/ops/result_type.h>
#include <ATen/ops/result_type.h>
#include <ATen/ops/result_type.h>
#include <ATen/ops/can_cast.h>
#include <ATen/ops/promote_types.h>
#include <ATen/ops/_local_scalar_dense.h>
#include <ATen/ops/_lstm_mps.h>
#include <ATen/ops/lstm_mps_backward.h>
#include <ATen/ops/_thnn_fused_lstm_cell.h>
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl.h>
#include <ATen/ops/_thnn_fused_lstm_cell_backward.h>
#include <ATen/ops/_thnn_differentiable_lstm_cell_backward.h>
#include <ATen/ops/_thnn_fused_gru_cell.h>
#include <ATen/ops/_thnn_fused_gru_cell_backward.h>
#include <ATen/ops/_thnn_differentiable_gru_cell_backward.h>
#include <ATen/ops/lstm.h>
#include <ATen/ops/lstm.h>
#include <ATen/ops/gru.h>
#include <ATen/ops/gru.h>
#include <ATen/ops/rnn_tanh.h>
#include <ATen/ops/rnn_tanh.h>
#include <ATen/ops/rnn_relu.h>
#include <ATen/ops/rnn_relu.h>
#include <ATen/ops/lstm_cell.h>
#include <ATen/ops/gru_cell.h>
#include <ATen/ops/rnn_tanh_cell.h>
#include <ATen/ops/rnn_relu_cell.h>
#include <ATen/ops/quantized_lstm_cell.h>
#include <ATen/ops/quantized_gru_cell.h>
#include <ATen/ops/quantized_rnn_relu_cell.h>
#include <ATen/ops/quantized_rnn_tanh_cell.h>
#include <ATen/ops/_pack_padded_sequence.h>
#include <ATen/ops/_pack_padded_sequence_backward.h>
#include <ATen/ops/_pad_packed_sequence.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/lift.h>
#include <ATen/ops/lift_fresh.h>
#include <ATen/ops/lift_fresh_copy.h>
#include <ATen/ops/is_set_to.h>
#include <ATen/ops/masked_fill.h>
#include <ATen/ops/masked_fill.h>
#include <ATen/ops/masked_fill.h>
#include <ATen/ops/masked_fill.h>
#include <ATen/ops/masked_scatter.h>
#include <ATen/ops/masked_scatter.h>
#include <ATen/ops/masked_scatter_backward.h>
#include <ATen/ops/_masked_softmax.h>
#include <ATen/ops/_masked_softmax_backward.h>
#include <ATen/ops/view.h>
#include <ATen/ops/view.h>
#include <ATen/ops/put.h>
#include <ATen/ops/put.h>
#include <ATen/ops/index_add.h>
#include <ATen/ops/index_add.h>
#include <ATen/ops/index_add.h>
#include <ATen/ops/index_add.h>
#include <ATen/ops/index_reduce.h>
#include <ATen/ops/index_reduce.h>
#include <ATen/ops/index_reduce.h>
#include <ATen/ops/index_fill.h>
#include <ATen/ops/index_fill.h>
#include <ATen/ops/index_fill.h>
#include <ATen/ops/index_fill.h>
#include <ATen/ops/index_fill.h>
#include <ATen/ops/index_fill.h>
#include <ATen/ops/index_fill.h>
#include <ATen/ops/index_fill.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter.h>
#include <ATen/ops/scatter_add.h>
#include <ATen/ops/scatter_add.h>
#include <ATen/ops/scatter_add.h>
#include <ATen/ops/scatter_add.h>
#include <ATen/ops/scatter_reduce.h>
#include <ATen/ops/scatter_reduce.h>
#include <ATen/ops/scatter_reduce.h>
#include <ATen/ops/eq.h>
#include <ATen/ops/eq.h>
#include <ATen/ops/bitwise_and.h>
#include <ATen/ops/bitwise_and.h>
#include <ATen/ops/bitwise_and.h>
#include <ATen/ops/bitwise_and.h>
#include <ATen/ops/bitwise_and.h>
#include <ATen/ops/bitwise_and.h>
#include <ATen/ops/bitwise_and.h>
#include <ATen/ops/and.h>
#include <ATen/ops/and.h>
#include <ATen/ops/and.h>
#include <ATen/ops/and.h>
#include <ATen/ops/bitwise_or.h>
#include <ATen/ops/bitwise_or.h>
#include <ATen/ops/bitwise_or.h>
#include <ATen/ops/bitwise_or.h>
#include <ATen/ops/bitwise_or.h>
#include <ATen/ops/bitwise_or.h>
#include <ATen/ops/bitwise_or.h>
#include <ATen/ops/or.h>
#include <ATen/ops/or.h>
#include <ATen/ops/or.h>
#include <ATen/ops/or.h>
#include <ATen/ops/bitwise_xor.h>
#include <ATen/ops/bitwise_xor.h>
#include <ATen/ops/bitwise_xor.h>
#include <ATen/ops/bitwise_xor.h>
#include <ATen/ops/bitwise_xor.h>
#include <ATen/ops/bitwise_xor.h>
#include <ATen/ops/bitwise_xor.h>
#include <ATen/ops/xor.h>
#include <ATen/ops/xor.h>
#include <ATen/ops/xor.h>
#include <ATen/ops/xor.h>
#include <ATen/ops/lshift.h>
#include <ATen/ops/lshift.h>
#include <ATen/ops/lshift.h>
#include <ATen/ops/lshift.h>
#include <ATen/ops/bitwise_left_shift.h>
#include <ATen/ops/bitwise_left_shift.h>
#include <ATen/ops/bitwise_left_shift.h>
#include <ATen/ops/bitwise_left_shift.h>
#include <ATen/ops/bitwise_left_shift.h>
#include <ATen/ops/bitwise_left_shift.h>
#include <ATen/ops/bitwise_left_shift.h>
#include <ATen/ops/rshift.h>
#include <ATen/ops/rshift.h>
#include <ATen/ops/rshift.h>
#include <ATen/ops/rshift.h>
#include <ATen/ops/bitwise_right_shift.h>
#include <ATen/ops/bitwise_right_shift.h>
#include <ATen/ops/bitwise_right_shift.h>
#include <ATen/ops/bitwise_right_shift.h>
#include <ATen/ops/bitwise_right_shift.h>
#include <ATen/ops/bitwise_right_shift.h>
#include <ATen/ops/bitwise_right_shift.h>
#include <ATen/ops/tril.h>
#include <ATen/ops/triu.h>
#include <ATen/ops/digamma.h>
#include <ATen/ops/lerp.h>
#include <ATen/ops/lerp.h>
#include <ATen/ops/addbmm.h>
#include <ATen/ops/addbmm.h>
#include <ATen/ops/addbmm.h>
#include <ATen/ops/random.h>
#include <ATen/ops/random.h>
#include <ATen/ops/random.h>
#include <ATen/ops/uniform.h>
#include <ATen/ops/cauchy.h>
#include <ATen/ops/log_normal.h>
#include <ATen/ops/exponential.h>
#include <ATen/ops/geometric.h>
#include <ATen/ops/diag.h>
#include <ATen/ops/diag.h>
#include <ATen/ops/cross.h>
#include <ATen/ops/cross.h>
#include <ATen/ops/triu.h>
#include <ATen/ops/triu.h>
#include <ATen/ops/tril.h>
#include <ATen/ops/tril.h>
#include <ATen/ops/tril_indices.h>
#include <ATen/ops/triu_indices.h>
#include <ATen/ops/trace.h>
#include <ATen/ops/trace_backward.h>
#include <ATen/ops/ne.h>
#include <ATen/ops/ne.h>
#include <ATen/ops/ne.h>
#include <ATen/ops/ne.h>
#include <ATen/ops/ne.h>
#include <ATen/ops/ne.h>
#include <ATen/ops/not_equal.h>
#include <ATen/ops/not_equal.h>
#include <ATen/ops/not_equal.h>
#include <ATen/ops/not_equal.h>
#include <ATen/ops/not_equal.h>
#include <ATen/ops/not_equal.h>
#include <ATen/ops/eq.h>
#include <ATen/ops/eq.h>
#include <ATen/ops/eq.h>
#include <ATen/ops/eq.h>
#include <ATen/ops/ge.h>
#include <ATen/ops/ge.h>
#include <ATen/ops/ge.h>
#include <ATen/ops/ge.h>
#include <ATen/ops/ge.h>
#include <ATen/ops/ge.h>
#include <ATen/ops/greater_equal.h>
#include <ATen/ops/greater_equal.h>
#include <ATen/ops/greater_equal.h>
#include <ATen/ops/greater_equal.h>
#include <ATen/ops/greater_equal.h>
#include <ATen/ops/greater_equal.h>
#include <ATen/ops/le.h>
#include <ATen/ops/le.h>
#include <ATen/ops/le.h>
#include <ATen/ops/le.h>
#include <ATen/ops/le.h>
#include <ATen/ops/le.h>
#include <ATen/ops/less_equal.h>
#include <ATen/ops/less_equal.h>
#include <ATen/ops/less_equal.h>
#include <ATen/ops/less_equal.h>
#include <ATen/ops/less_equal.h>
#include <ATen/ops/less_equal.h>
#include <ATen/ops/gt.h>
#include <ATen/ops/gt.h>
#include <ATen/ops/gt.h>
#include <ATen/ops/gt.h>
#include <ATen/ops/gt.h>
#include <ATen/ops/gt.h>
#include <ATen/ops/greater.h>
#include <ATen/ops/greater.h>
#include <ATen/ops/greater.h>
#include <ATen/ops/greater.h>
#include <ATen/ops/greater.h>
#include <ATen/ops/greater.h>
#include <ATen/ops/lt.h>
#include <ATen/ops/lt.h>
#include <ATen/ops/lt.h>
#include <ATen/ops/lt.h>
#include <ATen/ops/lt.h>
#include <ATen/ops/lt.h>
#include <ATen/ops/less.h>
#include <ATen/ops/less.h>
#include <ATen/ops/less.h>
#include <ATen/ops/less.h>
#include <ATen/ops/less.h>
#include <ATen/ops/less.h>
#include <ATen/ops/take.h>
#include <ATen/ops/take.h>
#include <ATen/ops/take_along_dim.h>
#include <ATen/ops/take_along_dim.h>
#include <ATen/ops/index_select.h>
#include <ATen/ops/index_select.h>
#include <ATen/ops/index_select.h>
#include <ATen/ops/index_select.h>
#include <ATen/ops/index_select_backward.h>
#include <ATen/ops/masked_select.h>
#include <ATen/ops/masked_select.h>
#include <ATen/ops/masked_select_backward.h>
#include <ATen/ops/nonzero.h>
#include <ATen/ops/nonzero.h>
#include <ATen/ops/nonzero_static.h>
#include <ATen/ops/nonzero_static.h>
#include <ATen/ops/nonzero_numpy.h>
#include <ATen/ops/argwhere.h>
#include <ATen/ops/gather.h>
#include <ATen/ops/gather.h>
#include <ATen/ops/gather_backward.h>
#include <ATen/ops/gather.h>
#include <ATen/ops/gather.h>
#include <ATen/ops/_gather_sparse_backward.h>
#include <ATen/ops/addcmul.h>
#include <ATen/ops/addcmul.h>
#include <ATen/ops/addcmul.h>
#include <ATen/ops/addcdiv.h>
#include <ATen/ops/addcdiv.h>
#include <ATen/ops/addcdiv.h>
#include <ATen/ops/cross_entropy_loss.h>
#include <ATen/ops/triangular_solve.h>
#include <ATen/ops/triangular_solve.h>
#include <ATen/ops/_linalg_check_errors.h>
#include <ATen/ops/linalg_solve_triangular.h>
#include <ATen/ops/linalg_solve_triangular.h>
#include <ATen/ops/linalg_vander.h>
#include <ATen/ops/svd.h>
#include <ATen/ops/svd.h>
#include <ATen/ops/swapaxes.h>
#include <ATen/ops/swapaxes.h>
#include <ATen/ops/swapdims.h>
#include <ATen/ops/swapdims.h>
#include <ATen/ops/cholesky.h>
#include <ATen/ops/cholesky.h>
#include <ATen/ops/cholesky_solve.h>
#include <ATen/ops/cholesky_solve.h>
#include <ATen/ops/_cholesky_solve_helper.h>
#include <ATen/ops/cholesky_inverse.h>
#include <ATen/ops/cholesky_inverse.h>
#include <ATen/ops/qr.h>
#include <ATen/ops/qr.h>
#include <ATen/ops/geqrf.h>
#include <ATen/ops/geqrf.h>
#include <ATen/ops/orgqr.h>
#include <ATen/ops/orgqr.h>
#include <ATen/ops/ormqr.h>
#include <ATen/ops/ormqr.h>
#include <ATen/ops/_lu_with_info.h>
#include <ATen/ops/lu_solve.h>
#include <ATen/ops/lu_solve.h>
#include <ATen/ops/lu_unpack.h>
#include <ATen/ops/lu_unpack.h>
#include <ATen/ops/multinomial.h>
#include <ATen/ops/multinomial.h>
#include <ATen/ops/lgamma.h>
#include <ATen/ops/lgamma.h>
#include <ATen/ops/lgamma.h>
#include <ATen/ops/digamma.h>
#include <ATen/ops/digamma.h>
#include <ATen/ops/polygamma.h>
#include <ATen/ops/polygamma.h>
#include <ATen/ops/polygamma.h>
#include <ATen/ops/erfinv.h>
#include <ATen/ops/erfinv.h>
#include <ATen/ops/erfinv.h>
#include <ATen/ops/i0.h>
#include <ATen/ops/i0.h>
#include <ATen/ops/i0.h>
#include <ATen/ops/sign.h>
#include <ATen/ops/sign.h>
#include <ATen/ops/sign.h>
#include <ATen/ops/signbit.h>
#include <ATen/ops/signbit.h>
#include <ATen/ops/dist.h>
#include <ATen/ops/atan2.h>
#include <ATen/ops/atan2.h>
#include <ATen/ops/atan2.h>
#include <ATen/ops/arctan2.h>
#include <ATen/ops/arctan2.h>
#include <ATen/ops/arctan2.h>
#include <ATen/ops/lerp.h>
#include <ATen/ops/lerp.h>
#include <ATen/ops/lerp.h>
#include <ATen/ops/lerp.h>
#include <ATen/ops/histc.h>
#include <ATen/ops/histc.h>
#include <ATen/ops/histogram.h>
#include <ATen/ops/histogram.h>
#include <ATen/ops/histogram.h>
#include <ATen/ops/histogram.h>
#include <ATen/ops/_histogramdd_bin_edges.h>
#include <ATen/ops/_histogramdd_from_bin_cts.h>
#include <ATen/ops/_histogramdd_from_bin_tensors.h>
#include <ATen/ops/histogramdd.h>
#include <ATen/ops/histogramdd.h>
#include <ATen/ops/histogramdd.h>
#include <ATen/ops/fmod.h>
#include <ATen/ops/fmod.h>
#include <ATen/ops/fmod.h>
#include <ATen/ops/fmod.h>
#include <ATen/ops/fmod.h>
#include <ATen/ops/fmod.h>
#include <ATen/ops/hypot.h>
#include <ATen/ops/hypot.h>
#include <ATen/ops/hypot.h>
#include <ATen/ops/igamma.h>
#include <ATen/ops/igamma.h>
#include <ATen/ops/igamma.h>
#include <ATen/ops/igammac.h>
#include <ATen/ops/igammac.h>
#include <ATen/ops/igammac.h>
#include <ATen/ops/nextafter.h>
#include <ATen/ops/nextafter.h>
#include <ATen/ops/nextafter.h>
#include <ATen/ops/remainder.h>
#include <ATen/ops/remainder.h>
#include <ATen/ops/remainder.h>
#include <ATen/ops/remainder.h>
#include <ATen/ops/remainder.h>
#include <ATen/ops/remainder.h>
#include <ATen/ops/remainder.h>
#include <ATen/ops/min.h>
#include <ATen/ops/min.h>
#include <ATen/ops/fmin.h>
#include <ATen/ops/fmin.h>
#include <ATen/ops/max.h>
#include <ATen/ops/fmax.h>
#include <ATen/ops/fmax.h>
#include <ATen/ops/maximum.h>
#include <ATen/ops/maximum.h>
#include <ATen/ops/max.h>
#include <ATen/ops/max.h>
#include <ATen/ops/max.h>
#include <ATen/ops/minimum.h>
#include <ATen/ops/minimum.h>
#include <ATen/ops/min.h>
#include <ATen/ops/min.h>
#include <ATen/ops/quantile.h>
#include <ATen/ops/quantile.h>
#include <ATen/ops/quantile.h>
#include <ATen/ops/quantile.h>
#include <ATen/ops/nanquantile.h>
#include <ATen/ops/nanquantile.h>
#include <ATen/ops/nanquantile.h>
#include <ATen/ops/nanquantile.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/sort.h>
#include <ATen/ops/msort.h>
#include <ATen/ops/msort.h>
#include <ATen/ops/argsort.h>
#include <ATen/ops/argsort.h>
#include <ATen/ops/argsort.h>
#include <ATen/ops/argsort.h>
#include <ATen/ops/topk.h>
#include <ATen/ops/topk.h>
#include <ATen/ops/all.h>
#include <ATen/ops/all.h>
#include <ATen/ops/any.h>
#include <ATen/ops/any.h>
#include <ATen/ops/renorm.h>
#include <ATen/ops/renorm.h>
#include <ATen/ops/renorm.h>
#include <ATen/ops/unfold.h>
#include <ATen/ops/unfold_backward.h>
#include <ATen/ops/equal.h>
#include <ATen/ops/pow.h>
#include <ATen/ops/pow.h>
#include <ATen/ops/pow.h>
#include <ATen/ops/pow.h>
#include <ATen/ops/pow.h>
#include <ATen/ops/pow.h>
#include <ATen/ops/pow.h>
#include <ATen/ops/pow.h>
#include <ATen/ops/float_power.h>
#include <ATen/ops/float_power.h>
#include <ATen/ops/float_power.h>
#include <ATen/ops/float_power.h>
#include <ATen/ops/float_power.h>
#include <ATen/ops/float_power.h>
#include <ATen/ops/float_power.h>
#include <ATen/ops/float_power.h>
#include <ATen/ops/normal.h>
#include <ATen/ops/normal.h>
#include <ATen/ops/normal.h>
#include <ATen/ops/normal.h>
#include <ATen/ops/normal.h>
#include <ATen/ops/normal.h>
#include <ATen/ops/normal.h>
#include <ATen/ops/normal.h>
#include <ATen/ops/normal.h>
#include <ATen/ops/normal.h>
#include <ATen/ops/alias.h>
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale.h>
#include <ATen/ops/_amp_update_scale.h>
#include <ATen/ops/_foreach_add.h>
#include <ATen/ops/_foreach_add.h>
#include <ATen/ops/_foreach_add.h>
#include <ATen/ops/_foreach_add.h>
#include <ATen/ops/_foreach_add.h>
#include <ATen/ops/_foreach_add.h>
#include <ATen/ops/_foreach_add.h>
#include <ATen/ops/_foreach_add.h>
#include <ATen/ops/_foreach_sub.h>
#include <ATen/ops/_foreach_sub.h>
#include <ATen/ops/_foreach_sub.h>
#include <ATen/ops/_foreach_sub.h>
#include <ATen/ops/_foreach_sub.h>
#include <ATen/ops/_foreach_sub.h>
#include <ATen/ops/_foreach_mul.h>
#include <ATen/ops/_foreach_mul.h>
#include <ATen/ops/_foreach_mul.h>
#include <ATen/ops/_foreach_mul.h>
#include <ATen/ops/_foreach_mul.h>
#include <ATen/ops/_foreach_mul.h>
#include <ATen/ops/_foreach_mul.h>
#include <ATen/ops/_foreach_mul.h>
#include <ATen/ops/_foreach_div.h>
#include <ATen/ops/_foreach_div.h>
#include <ATen/ops/_foreach_div.h>
#include <ATen/ops/_foreach_div.h>
#include <ATen/ops/_foreach_div.h>
#include <ATen/ops/_foreach_div.h>
#include <ATen/ops/_foreach_div.h>
#include <ATen/ops/_foreach_div.h>
#include <ATen/ops/_foreach_clamp_max.h>
#include <ATen/ops/_foreach_clamp_max.h>
#include <ATen/ops/_foreach_clamp_max.h>
#include <ATen/ops/_foreach_clamp_max.h>
#include <ATen/ops/_foreach_clamp_max.h>
#include <ATen/ops/_foreach_clamp_max.h>
#include <ATen/ops/_foreach_clamp_min.h>
#include <ATen/ops/_foreach_clamp_min.h>
#include <ATen/ops/_foreach_clamp_min.h>
#include <ATen/ops/_foreach_clamp_min.h>
#include <ATen/ops/_foreach_clamp_min.h>
#include <ATen/ops/_foreach_clamp_min.h>
#include <ATen/ops/_foreach_maximum.h>
#include <ATen/ops/_foreach_maximum.h>
#include <ATen/ops/_foreach_maximum.h>
#include <ATen/ops/_foreach_maximum.h>
#include <ATen/ops/_foreach_maximum.h>
#include <ATen/ops/_foreach_maximum.h>
#include <ATen/ops/_foreach_minimum.h>
#include <ATen/ops/_foreach_minimum.h>
#include <ATen/ops/_foreach_minimum.h>
#include <ATen/ops/_foreach_minimum.h>
#include <ATen/ops/_foreach_minimum.h>
#include <ATen/ops/_foreach_minimum.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcmul.h>
#include <ATen/ops/_foreach_addcmul.h>
#include <ATen/ops/_foreach_addcmul.h>
#include <ATen/ops/_foreach_addcmul.h>
#include <ATen/ops/_foreach_addcmul.h>
#include <ATen/ops/_foreach_addcmul.h>
#include <ATen/ops/_foreach_abs.h>
#include <ATen/ops/_foreach_abs.h>
#include <ATen/ops/_foreach_acos.h>
#include <ATen/ops/_foreach_acos.h>
#include <ATen/ops/_foreach_asin.h>
#include <ATen/ops/_foreach_asin.h>
#include <ATen/ops/_foreach_atan.h>
#include <ATen/ops/_foreach_atan.h>
#include <ATen/ops/_foreach_ceil.h>
#include <ATen/ops/_foreach_ceil.h>
#include <ATen/ops/_foreach_cos.h>
#include <ATen/ops/_foreach_cos.h>
#include <ATen/ops/_foreach_cosh.h>
#include <ATen/ops/_foreach_cosh.h>
#include <ATen/ops/_foreach_erf.h>
#include <ATen/ops/_foreach_erf.h>
#include <ATen/ops/_foreach_erfc.h>
#include <ATen/ops/_foreach_erfc.h>
#include <ATen/ops/_foreach_exp.h>
#include <ATen/ops/_foreach_exp.h>
#include <ATen/ops/_foreach_expm1.h>
#include <ATen/ops/_foreach_expm1.h>
#include <ATen/ops/_foreach_floor.h>
#include <ATen/ops/_foreach_floor.h>
#include <ATen/ops/_foreach_frac.h>
#include <ATen/ops/_foreach_frac.h>
#include <ATen/ops/_foreach_lerp.h>
#include <ATen/ops/_foreach_lerp.h>
#include <ATen/ops/_foreach_lerp.h>
#include <ATen/ops/_foreach_lerp.h>
#include <ATen/ops/_foreach_lerp.h>
#include <ATen/ops/_foreach_lerp.h>
#include <ATen/ops/_foreach_lgamma.h>
#include <ATen/ops/_foreach_lgamma.h>
#include <ATen/ops/_foreach_log.h>
#include <ATen/ops/_foreach_log.h>
#include <ATen/ops/_foreach_log10.h>
#include <ATen/ops/_foreach_log10.h>
#include <ATen/ops/_foreach_log1p.h>
#include <ATen/ops/_foreach_log1p.h>
#include <ATen/ops/_foreach_log2.h>
#include <ATen/ops/_foreach_log2.h>
#include <ATen/ops/_foreach_max.h>
#include <ATen/ops/_foreach_neg.h>
#include <ATen/ops/_foreach_neg.h>
#include <ATen/ops/_foreach_norm.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_reciprocal.h>
#include <ATen/ops/_foreach_reciprocal.h>
#include <ATen/ops/_foreach_round.h>
#include <ATen/ops/_foreach_round.h>
#include <ATen/ops/_foreach_rsqrt.h>
#include <ATen/ops/_foreach_rsqrt.h>
#include <ATen/ops/_foreach_sigmoid.h>
#include <ATen/ops/_foreach_sigmoid.h>
#include <ATen/ops/_foreach_sign.h>
#include <ATen/ops/_foreach_sign.h>
#include <ATen/ops/_foreach_sin.h>
#include <ATen/ops/_foreach_sin.h>
#include <ATen/ops/_foreach_sinh.h>
#include <ATen/ops/_foreach_sinh.h>
#include <ATen/ops/_foreach_sqrt.h>
#include <ATen/ops/_foreach_sqrt.h>
#include <ATen/ops/_foreach_tan.h>
#include <ATen/ops/_foreach_tan.h>
#include <ATen/ops/_foreach_tanh.h>
#include <ATen/ops/_foreach_tanh.h>
#include <ATen/ops/_foreach_trunc.h>
#include <ATen/ops/_foreach_trunc.h>
#include <ATen/ops/_foreach_zero.h>
#include <ATen/ops/_foreach_copy.h>
#include <ATen/ops/_foreach_copy.h>
#include <ATen/ops/bucketize.h>
#include <ATen/ops/bucketize.h>
#include <ATen/ops/bucketize.h>
#include <ATen/ops/searchsorted.h>
#include <ATen/ops/searchsorted.h>
#include <ATen/ops/searchsorted.h>
#include <ATen/ops/searchsorted.h>
#include <ATen/ops/_convert_indices_from_coo_to_csr.h>
#include <ATen/ops/_convert_indices_from_coo_to_csr.h>
#include <ATen/ops/_convert_indices_from_csr_to_coo.h>
#include <ATen/ops/_convert_indices_from_csr_to_coo.h>
#include <ATen/ops/mse_loss.h>
#include <ATen/ops/mse_loss.h>
#include <ATen/ops/mse_loss_backward.h>
#include <ATen/ops/mse_loss_backward.h>
#include <ATen/ops/l1_loss.h>
#include <ATen/ops/multi_margin_loss.h>
#include <ATen/ops/multi_margin_loss.h>
#include <ATen/ops/multi_margin_loss_backward.h>
#include <ATen/ops/multi_margin_loss_backward.h>
#include <ATen/ops/multilabel_margin_loss.h>
#include <ATen/ops/multilabel_margin_loss.h>
#include <ATen/ops/multilabel_margin_loss_forward.h>
#include <ATen/ops/multilabel_margin_loss_forward.h>
#include <ATen/ops/multilabel_margin_loss_backward.h>
#include <ATen/ops/multilabel_margin_loss_backward.h>
#include <ATen/ops/nll_loss.h>
#include <ATen/ops/nll_loss_nd.h>
#include <ATen/ops/nll_loss.h>
#include <ATen/ops/nll_loss_forward.h>
#include <ATen/ops/nll_loss_forward.h>
#include <ATen/ops/nll_loss_backward.h>
#include <ATen/ops/nll_loss_backward.h>
#include <ATen/ops/nll_loss2d.h>
#include <ATen/ops/nll_loss2d.h>
#include <ATen/ops/nll_loss2d_forward.h>
#include <ATen/ops/nll_loss2d_forward.h>
#include <ATen/ops/nll_loss2d_backward.h>
#include <ATen/ops/nll_loss2d_backward.h>
#include <ATen/ops/smooth_l1_loss.h>
#include <ATen/ops/smooth_l1_loss.h>
#include <ATen/ops/smooth_l1_loss_backward.h>
#include <ATen/ops/smooth_l1_loss_backward.h>
#include <ATen/ops/huber_loss.h>
#include <ATen/ops/huber_loss.h>
#include <ATen/ops/huber_loss_backward.h>
#include <ATen/ops/huber_loss_backward.h>
#include <ATen/ops/soft_margin_loss.h>
#include <ATen/ops/soft_margin_loss.h>
#include <ATen/ops/soft_margin_loss_backward.h>
#include <ATen/ops/soft_margin_loss_backward.h>
#include <ATen/ops/elu.h>
#include <ATen/ops/elu.h>
#include <ATen/ops/elu_backward.h>
#include <ATen/ops/elu_backward.h>
#include <ATen/ops/elu.h>
#include <ATen/ops/glu.h>
#include <ATen/ops/glu.h>
#include <ATen/ops/glu_backward.h>
#include <ATen/ops/glu_backward.h>
#include <ATen/ops/glu_jvp.h>
#include <ATen/ops/glu_backward_jvp.h>
#include <ATen/ops/hardsigmoid.h>
#include <ATen/ops/hardsigmoid.h>
#include <ATen/ops/hardsigmoid.h>
#include <ATen/ops/hardsigmoid_backward.h>
#include <ATen/ops/hardsigmoid_backward.h>
#include <ATen/ops/hardtanh.h>
#include <ATen/ops/hardtanh.h>
#include <ATen/ops/hardtanh_backward.h>
#include <ATen/ops/hardtanh_backward.h>
#include <ATen/ops/hardtanh.h>
#include <ATen/ops/hardswish.h>
#include <ATen/ops/hardswish.h>
#include <ATen/ops/hardswish.h>
#include <ATen/ops/hardswish_backward.h>
#include <ATen/ops/leaky_relu.h>
#include <ATen/ops/leaky_relu.h>
#include <ATen/ops/leaky_relu_backward.h>
#include <ATen/ops/leaky_relu_backward.h>
#include <ATen/ops/leaky_relu.h>
#include <ATen/ops/log_sigmoid.h>
#include <ATen/ops/log_sigmoid.h>
#include <ATen/ops/log_sigmoid_forward.h>
#include <ATen/ops/log_sigmoid_forward.h>
#include <ATen/ops/log_sigmoid_backward.h>
#include <ATen/ops/log_sigmoid_backward.h>
#include <ATen/ops/rrelu_with_noise.h>
#include <ATen/ops/rrelu_with_noise.h>
#include <ATen/ops/rrelu_with_noise_backward.h>
#include <ATen/ops/rrelu_with_noise.h>
#include <ATen/ops/softplus.h>
#include <ATen/ops/softplus.h>
#include <ATen/ops/softplus_backward.h>
#include <ATen/ops/softplus_backward.h>
#include <ATen/ops/softshrink.h>
#include <ATen/ops/softshrink.h>
#include <ATen/ops/softshrink_backward.h>
#include <ATen/ops/softshrink_backward.h>
#include <ATen/ops/adaptive_avg_pool2d.h>
#include <ATen/ops/adaptive_avg_pool2d.h>
#include <ATen/ops/mkldnn_adaptive_avg_pool2d.h>
#include <ATen/ops/mkldnn_adaptive_avg_pool2d.h>
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward.h>
#include <ATen/ops/_adaptive_avg_pool2d.h>
#include <ATen/ops/_adaptive_avg_pool2d_backward.h>
#include <ATen/ops/adaptive_avg_pool3d.h>
#include <ATen/ops/adaptive_avg_pool3d.h>
#include <ATen/ops/_adaptive_avg_pool3d.h>
#include <ATen/ops/adaptive_avg_pool3d_backward.h>
#include <ATen/ops/_adaptive_avg_pool3d_backward.h>
#include <ATen/ops/adaptive_max_pool2d.h>
#include <ATen/ops/adaptive_max_pool2d.h>
#include <ATen/ops/adaptive_max_pool2d_backward.h>
#include <ATen/ops/adaptive_max_pool2d_backward.h>
#include <ATen/ops/adaptive_max_pool3d.h>
#include <ATen/ops/adaptive_max_pool3d.h>
#include <ATen/ops/adaptive_max_pool3d_backward.h>
#include <ATen/ops/adaptive_max_pool3d_backward.h>
#include <ATen/ops/avg_pool2d.h>
#include <ATen/ops/avg_pool2d.h>
#include <ATen/ops/avg_pool2d_backward.h>
#include <ATen/ops/avg_pool2d_backward.h>
#include <ATen/ops/avg_pool3d.h>
#include <ATen/ops/avg_pool3d.h>
#include <ATen/ops/avg_pool3d_backward.h>
#include <ATen/ops/avg_pool3d_backward.h>
#include <ATen/ops/fractional_max_pool2d.h>
#include <ATen/ops/fractional_max_pool2d.h>
#include <ATen/ops/fractional_max_pool2d_backward.h>
#include <ATen/ops/fractional_max_pool2d_backward.h>
#include <ATen/ops/fractional_max_pool3d.h>
#include <ATen/ops/fractional_max_pool3d.h>
#include <ATen/ops/fractional_max_pool3d_backward.h>
#include <ATen/ops/fractional_max_pool3d_backward.h>
#include <ATen/ops/max_pool2d_with_indices.h>
#include <ATen/ops/max_pool2d_with_indices.h>
#include <ATen/ops/max_pool2d_with_indices_backward.h>
#include <ATen/ops/max_pool2d_with_indices_backward.h>
#include <ATen/ops/max_pool3d_with_indices.h>
#include <ATen/ops/max_pool3d_with_indices.h>
#include <ATen/ops/max_pool3d_with_indices_backward.h>
#include <ATen/ops/max_pool3d_with_indices_backward.h>
#include <ATen/ops/max_unpool2d.h>
#include <ATen/ops/max_unpool2d.h>
#include <ATen/ops/max_unpool3d.h>
#include <ATen/ops/max_unpool3d.h>
#include <ATen/ops/reflection_pad1d.h>
#include <ATen/ops/reflection_pad1d.h>
#include <ATen/ops/reflection_pad1d_backward.h>
#include <ATen/ops/reflection_pad1d_backward.h>
#include <ATen/ops/reflection_pad2d.h>
#include <ATen/ops/reflection_pad2d.h>
#include <ATen/ops/reflection_pad2d_backward.h>
#include <ATen/ops/reflection_pad2d_backward.h>
#include <ATen/ops/reflection_pad3d.h>
#include <ATen/ops/reflection_pad3d.h>
#include <ATen/ops/reflection_pad3d_backward.h>
#include <ATen/ops/reflection_pad3d_backward.h>
#include <ATen/ops/replication_pad1d.h>
#include <ATen/ops/replication_pad1d.h>
#include <ATen/ops/replication_pad1d_backward.h>
#include <ATen/ops/replication_pad1d_backward.h>
#include <ATen/ops/replication_pad2d.h>
#include <ATen/ops/replication_pad2d.h>
#include <ATen/ops/replication_pad2d_backward.h>
#include <ATen/ops/replication_pad2d_backward.h>
#include <ATen/ops/replication_pad3d.h>
#include <ATen/ops/replication_pad3d.h>
#include <ATen/ops/replication_pad3d_backward.h>
#include <ATen/ops/replication_pad3d_backward.h>
#include <ATen/ops/_pad_circular.h>
#include <ATen/ops/_pad_enum.h>
#include <ATen/ops/pad.h>
#include <ATen/ops/upsample_linear1d.h>
#include <ATen/ops/upsample_bilinear2d.h>
#include <ATen/ops/_upsample_bilinear2d_aa.h>
#include <ATen/ops/upsample_trilinear3d.h>
#include <ATen/ops/upsample_bicubic2d.h>
#include <ATen/ops/_upsample_bicubic2d_aa.h>
#include <ATen/ops/upsample_nearest1d.h>
#include <ATen/ops/_upsample_nearest_exact1d.h>
#include <ATen/ops/upsample_nearest2d.h>
#include <ATen/ops/_upsample_nearest_exact2d.h>
#include <ATen/ops/upsample_nearest3d.h>
#include <ATen/ops/_upsample_nearest_exact3d.h>
#include <ATen/ops/upsample_linear1d.h>
#include <ATen/ops/upsample_linear1d.h>
#include <ATen/ops/upsample_linear1d_backward.h>
#include <ATen/ops/upsample_linear1d_backward.h>
#include <ATen/ops/upsample_bilinear2d.h>
#include <ATen/ops/upsample_bilinear2d.h>
#include <ATen/ops/upsample_bilinear2d_backward.h>
#include <ATen/ops/upsample_bilinear2d_backward.h>
#include <ATen/ops/_upsample_bilinear2d_aa.h>
#include <ATen/ops/_upsample_bilinear2d_aa.h>
#include <ATen/ops/_upsample_bilinear2d_aa_backward.h>
#include <ATen/ops/_upsample_bilinear2d_aa_backward.h>
#include <ATen/ops/upsample_bicubic2d.h>
#include <ATen/ops/upsample_bicubic2d.h>
#include <ATen/ops/upsample_bicubic2d_backward.h>
#include <ATen/ops/upsample_bicubic2d_backward.h>
#include <ATen/ops/_upsample_bicubic2d_aa.h>
#include <ATen/ops/_upsample_bicubic2d_aa.h>
#include <ATen/ops/_upsample_bicubic2d_aa_backward.h>
#include <ATen/ops/_upsample_bicubic2d_aa_backward.h>
#include <ATen/ops/upsample_trilinear3d.h>
#include <ATen/ops/upsample_trilinear3d.h>
#include <ATen/ops/upsample_trilinear3d_backward.h>
#include <ATen/ops/upsample_trilinear3d_backward.h>
#include <ATen/ops/upsample_nearest1d.h>
#include <ATen/ops/_upsample_nearest_exact1d.h>
#include <ATen/ops/upsample_nearest1d.h>
#include <ATen/ops/_upsample_nearest_exact1d.h>
#include <ATen/ops/upsample_nearest1d_backward.h>
#include <ATen/ops/_upsample_nearest_exact1d_backward.h>
#include <ATen/ops/upsample_nearest1d_backward.h>
#include <ATen/ops/_upsample_nearest_exact1d_backward.h>
#include <ATen/ops/upsample_nearest2d.h>
#include <ATen/ops/_upsample_nearest_exact2d.h>
#include <ATen/ops/upsample_nearest2d.h>
#include <ATen/ops/_upsample_nearest_exact2d.h>
#include <ATen/ops/upsample_nearest2d_backward.h>
#include <ATen/ops/_upsample_nearest_exact2d_backward.h>
#include <ATen/ops/upsample_nearest2d_backward.h>
#include <ATen/ops/_upsample_nearest_exact2d_backward.h>
#include <ATen/ops/upsample_nearest3d.h>
#include <ATen/ops/_upsample_nearest_exact3d.h>
#include <ATen/ops/upsample_nearest3d.h>
#include <ATen/ops/_upsample_nearest_exact3d.h>
#include <ATen/ops/upsample_nearest3d_backward.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward.h>
#include <ATen/ops/upsample_nearest3d_backward.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward.h>
#include <ATen/ops/sigmoid_backward.h>
#include <ATen/ops/sigmoid_backward.h>
#include <ATen/ops/logit_backward.h>
#include <ATen/ops/logit_backward.h>
#include <ATen/ops/tanh_backward.h>
#include <ATen/ops/tanh_backward.h>
#include <ATen/ops/slow_conv_transpose2d.h>
#include <ATen/ops/slow_conv_transpose2d.h>
#include <ATen/ops/slow_conv_transpose3d.h>
#include <ATen/ops/slow_conv_transpose3d.h>
#include <ATen/ops/thnn_conv2d.h>
#include <ATen/ops/thnn_conv2d.h>
#include <ATen/ops/_slow_conv2d_forward.h>
#include <ATen/ops/_slow_conv2d_forward.h>
#include <ATen/ops/_slow_conv2d_backward.h>
#include <ATen/ops/_slow_conv2d_backward.h>
#include <ATen/ops/_conv_depthwise2d.h>
#include <ATen/ops/_conv_depthwise2d.h>
#include <ATen/ops/conv_depthwise3d.h>
#include <ATen/ops/slow_conv3d.h>
#include <ATen/ops/slow_conv3d.h>
#include <ATen/ops/slow_conv3d_forward.h>
#include <ATen/ops/slow_conv3d_forward.h>
#include <ATen/ops/slow_conv_dilated2d.h>
#include <ATen/ops/slow_conv_dilated3d.h>
#include <ATen/ops/col2im.h>
#include <ATen/ops/col2im.h>
#include <ATen/ops/column_stack.h>
#include <ATen/ops/column_stack.h>
#include <ATen/ops/im2col.h>
#include <ATen/ops/im2col.h>
#include <ATen/ops/isfinite.h>
#include <ATen/ops/isinf.h>
#include <ATen/ops/record_stream.h>
#include <ATen/ops/isposinf.h>
#include <ATen/ops/isposinf.h>
#include <ATen/ops/isneginf.h>
#include <ATen/ops/isneginf.h>
#include <ATen/ops/_add_batch_dim.h>
#include <ATen/ops/_remove_batch_dim.h>
#include <ATen/ops/special_entr.h>
#include <ATen/ops/special_entr.h>
#include <ATen/ops/special_ndtri.h>
#include <ATen/ops/special_ndtri.h>
#include <ATen/ops/special_log_ndtr.h>
#include <ATen/ops/special_log_ndtr.h>
#include <ATen/ops/special_expm1.h>
#include <ATen/ops/special_expm1.h>
#include <ATen/ops/special_exp2.h>
#include <ATen/ops/special_exp2.h>
#include <ATen/ops/special_psi.h>
#include <ATen/ops/special_psi.h>
#include <ATen/ops/special_digamma.h>
#include <ATen/ops/special_digamma.h>
#include <ATen/ops/special_gammaln.h>
#include <ATen/ops/special_gammaln.h>
#include <ATen/ops/special_erf.h>
#include <ATen/ops/special_erf.h>
#include <ATen/ops/special_erfc.h>
#include <ATen/ops/special_erfc.h>
#include <ATen/ops/special_erfcx.h>
#include <ATen/ops/special_erfcx.h>
#include <ATen/ops/special_erfinv.h>
#include <ATen/ops/special_erfinv.h>
#include <ATen/ops/special_ndtr.h>
#include <ATen/ops/special_ndtr.h>
#include <ATen/ops/special_xlog1py.h>
#include <ATen/ops/special_xlog1py.h>
#include <ATen/ops/special_xlog1py.h>
#include <ATen/ops/special_xlog1py.h>
#include <ATen/ops/special_xlog1py.h>
#include <ATen/ops/special_xlog1py.h>
#include <ATen/ops/special_xlogy.h>
#include <ATen/ops/special_xlogy.h>
#include <ATen/ops/special_xlogy.h>
#include <ATen/ops/special_xlogy.h>
#include <ATen/ops/special_xlogy.h>
#include <ATen/ops/special_xlogy.h>
#include <ATen/ops/special_zeta.h>
#include <ATen/ops/special_zeta.h>
#include <ATen/ops/special_zeta.h>
#include <ATen/ops/special_zeta.h>
#include <ATen/ops/special_zeta.h>
#include <ATen/ops/special_zeta.h>
#include <ATen/ops/special_i0.h>
#include <ATen/ops/special_i0.h>
#include <ATen/ops/special_i0e.h>
#include <ATen/ops/special_i0e.h>
#include <ATen/ops/special_i1.h>
#include <ATen/ops/special_i1.h>
#include <ATen/ops/special_i1e.h>
#include <ATen/ops/special_i1e.h>
#include <ATen/ops/special_logit.h>
#include <ATen/ops/special_logit.h>
#include <ATen/ops/special_polygamma.h>
#include <ATen/ops/special_polygamma.h>
#include <ATen/ops/special_logsumexp.h>
#include <ATen/ops/special_logsumexp.h>
#include <ATen/ops/special_expit.h>
#include <ATen/ops/special_expit.h>
#include <ATen/ops/special_sinc.h>
#include <ATen/ops/special_sinc.h>
#include <ATen/ops/special_round.h>
#include <ATen/ops/special_round.h>
#include <ATen/ops/special_log1p.h>
#include <ATen/ops/special_log1p.h>
#include <ATen/ops/special_log_softmax.h>
#include <ATen/ops/special_gammainc.h>
#include <ATen/ops/special_gammainc.h>
#include <ATen/ops/special_gammaincc.h>
#include <ATen/ops/special_gammaincc.h>
#include <ATen/ops/special_multigammaln.h>
#include <ATen/ops/special_multigammaln.h>
#include <ATen/ops/special_softmax.h>
#include <ATen/ops/fft_fft.h>
#include <ATen/ops/fft_fft.h>
#include <ATen/ops/fft_ifft.h>
#include <ATen/ops/fft_ifft.h>
#include <ATen/ops/fft_rfft.h>
#include <ATen/ops/fft_rfft.h>
#include <ATen/ops/fft_irfft.h>
#include <ATen/ops/fft_irfft.h>
#include <ATen/ops/fft_hfft.h>
#include <ATen/ops/fft_hfft.h>
#include <ATen/ops/fft_ihfft.h>
#include <ATen/ops/fft_ihfft.h>
#include <ATen/ops/fft_fft2.h>
#include <ATen/ops/fft_fft2.h>
#include <ATen/ops/fft_ifft2.h>
#include <ATen/ops/fft_ifft2.h>
#include <ATen/ops/fft_rfft2.h>
#include <ATen/ops/fft_rfft2.h>
#include <ATen/ops/fft_irfft2.h>
#include <ATen/ops/fft_irfft2.h>
#include <ATen/ops/fft_hfft2.h>
#include <ATen/ops/fft_hfft2.h>
#include <ATen/ops/fft_ihfft2.h>
#include <ATen/ops/fft_ihfft2.h>
#include <ATen/ops/fft_fftn.h>
#include <ATen/ops/fft_fftn.h>
#include <ATen/ops/fft_ifftn.h>
#include <ATen/ops/fft_ifftn.h>
#include <ATen/ops/fft_rfftn.h>
#include <ATen/ops/fft_rfftn.h>
#include <ATen/ops/fft_irfftn.h>
#include <ATen/ops/fft_irfftn.h>
#include <ATen/ops/fft_hfftn.h>
#include <ATen/ops/fft_hfftn.h>
#include <ATen/ops/fft_ihfftn.h>
#include <ATen/ops/fft_ihfftn.h>
#include <ATen/ops/fft_fftfreq.h>
#include <ATen/ops/fft_fftfreq.h>
#include <ATen/ops/fft_rfftfreq.h>
#include <ATen/ops/fft_rfftfreq.h>
#include <ATen/ops/fft_fftshift.h>
#include <ATen/ops/fft_ifftshift.h>
#include <ATen/ops/linalg_cholesky_ex.h>
#include <ATen/ops/linalg_cholesky_ex.h>
#include <ATen/ops/linalg_cholesky.h>
#include <ATen/ops/linalg_cholesky.h>
#include <ATen/ops/linalg_cross.h>
#include <ATen/ops/linalg_cross.h>
#include <ATen/ops/linalg_lu_factor.h>
#include <ATen/ops/linalg_lu_factor.h>
#include <ATen/ops/linalg_lu_factor_ex.h>
#include <ATen/ops/linalg_lu_factor_ex.h>
#include <ATen/ops/linalg_lu.h>
#include <ATen/ops/linalg_lu.h>
#include <ATen/ops/linalg_lu_solve.h>
#include <ATen/ops/linalg_lu_solve.h>
#include <ATen/ops/_linalg_det.h>
#include <ATen/ops/_linalg_det.h>
#include <ATen/ops/linalg_det.h>
#include <ATen/ops/linalg_det.h>
#include <ATen/ops/det.h>
#include <ATen/ops/linalg_ldl_factor_ex.h>
#include <ATen/ops/linalg_ldl_factor_ex.h>
#include <ATen/ops/linalg_ldl_factor.h>
#include <ATen/ops/linalg_ldl_factor.h>
#include <ATen/ops/linalg_ldl_solve.h>
#include <ATen/ops/linalg_ldl_solve.h>
#include <ATen/ops/linalg_lstsq.h>
#include <ATen/ops/linalg_lstsq.h>
#include <ATen/ops/linalg_matmul.h>
#include <ATen/ops/linalg_matmul.h>
#include <ATen/ops/linalg_vecdot.h>
#include <ATen/ops/linalg_vecdot.h>
#include <ATen/ops/linalg_matrix_exp.h>
#include <ATen/ops/_linalg_slogdet.h>
#include <ATen/ops/_linalg_slogdet.h>
#include <ATen/ops/linalg_slogdet.h>
#include <ATen/ops/linalg_slogdet.h>
#include <ATen/ops/slogdet.h>
#include <ATen/ops/slogdet.h>
#include <ATen/ops/logdet.h>
#include <ATen/ops/linalg_eig.h>
#include <ATen/ops/linalg_eig.h>
#include <ATen/ops/_linalg_eigvals.h>
#include <ATen/ops/linalg_eigvals.h>
#include <ATen/ops/linalg_eigvals.h>
#include <ATen/ops/_linalg_eigh.h>
#include <ATen/ops/_linalg_eigh.h>
#include <ATen/ops/linalg_eigh.h>
#include <ATen/ops/linalg_eigh.h>
#include <ATen/ops/linalg_eigvalsh.h>
#include <ATen/ops/linalg_eigvalsh.h>
#include <ATen/ops/linalg_householder_product.h>
#include <ATen/ops/linalg_householder_product.h>
#include <ATen/ops/linalg_inv_ex.h>
#include <ATen/ops/linalg_inv_ex.h>
#include <ATen/ops/linalg_inv.h>
#include <ATen/ops/linalg_inv.h>
#include <ATen/ops/inverse.h>
#include <ATen/ops/inverse.h>
#include <ATen/ops/inner.h>
#include <ATen/ops/inner.h>
#include <ATen/ops/outer.h>
#include <ATen/ops/outer.h>
#include <ATen/ops/ger.h>
#include <ATen/ops/ger.h>
#include <ATen/ops/linalg_norm.h>
#include <ATen/ops/linalg_norm.h>
#include <ATen/ops/linalg_norm.h>
#include <ATen/ops/linalg_norm.h>
#include <ATen/ops/linalg_vector_norm.h>
#include <ATen/ops/linalg_vector_norm.h>
#include <ATen/ops/linalg_matrix_norm.h>
#include <ATen/ops/linalg_matrix_norm.h>
#include <ATen/ops/linalg_matrix_norm.h>
#include <ATen/ops/linalg_matrix_norm.h>
#include <ATen/ops/_linalg_svd.h>
#include <ATen/ops/_linalg_svd.h>
#include <ATen/ops/linalg_svd.h>
#include <ATen/ops/linalg_svd.h>
#include <ATen/ops/linalg_svdvals.h>
#include <ATen/ops/linalg_svdvals.h>
#include <ATen/ops/linalg_cond.h>
#include <ATen/ops/linalg_cond.h>
#include <ATen/ops/linalg_cond.h>
#include <ATen/ops/linalg_cond.h>
#include <ATen/ops/linalg_pinv.h>
#include <ATen/ops/linalg_pinv.h>
#include <ATen/ops/linalg_pinv.h>
#include <ATen/ops/linalg_pinv.h>
#include <ATen/ops/linalg_pinv.h>
#include <ATen/ops/linalg_pinv.h>
#include <ATen/ops/linalg_pinv.h>
#include <ATen/ops/linalg_pinv.h>
#include <ATen/ops/_linalg_solve_ex.h>
#include <ATen/ops/_linalg_solve_ex.h>
#include <ATen/ops/linalg_solve_ex.h>
#include <ATen/ops/linalg_solve_ex.h>
#include <ATen/ops/linalg_solve.h>
#include <ATen/ops/_spsolve.h>
#include <ATen/ops/linalg_solve.h>
#include <ATen/ops/linalg_tensorinv.h>
#include <ATen/ops/linalg_tensorinv.h>
#include <ATen/ops/linalg_tensorsolve.h>
#include <ATen/ops/linalg_tensorsolve.h>
#include <ATen/ops/linalg_qr.h>
#include <ATen/ops/linalg_qr.h>
#include <ATen/ops/linalg_matrix_power.h>
#include <ATen/ops/linalg_matrix_power.h>
#include <ATen/ops/linalg_matrix_rank.h>
#include <ATen/ops/linalg_matrix_rank.h>
#include <ATen/ops/linalg_matrix_rank.h>
#include <ATen/ops/linalg_matrix_rank.h>
#include <ATen/ops/linalg_matrix_rank.h>
#include <ATen/ops/linalg_matrix_rank.h>
#include <ATen/ops/linalg_matrix_rank.h>
#include <ATen/ops/linalg_matrix_rank.h>
#include <ATen/ops/linalg_multi_dot.h>
#include <ATen/ops/linalg_multi_dot.h>
#include <ATen/ops/nested_to_padded_tensor.h>
#include <ATen/ops/_test_serialization_subcmul.h>
#include <ATen/ops/_test_parallel_materialize.h>
#include <ATen/ops/_test_optional_intlist.h>
#include <ATen/ops/_test_optional_filled_intlist.h>
#include <ATen/ops/_test_optional_floatlist.h>
#include <ATen/ops/_test_string_default.h>
#include <ATen/ops/_test_ambiguous_defaults.h>
#include <ATen/ops/_test_ambiguous_defaults.h>
#include <ATen/ops/_test_warn_in_autograd.h>
#include <ATen/ops/_test_autograd_multiple_dispatch.h>
#include <ATen/ops/_test_autograd_multiple_dispatch.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_view.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy.h>
#include <ATen/ops/segment_reduce.h>
#include <ATen/ops/_segment_reduce_backward.h>
#include <ATen/ops/pad_sequence.h>
#include <ATen/ops/flatten_dense_tensors.h>
#include <ATen/ops/unflatten_dense_tensors.h>
#include <ATen/ops/_nested_tensor_from_tensor_list.h>
#include <ATen/ops/_fw_primal_copy.h>
#include <ATen/ops/_make_dual_copy.h>
#include <ATen/ops/view_as_real_copy.h>
#include <ATen/ops/view_as_complex_copy.h>
#include <ATen/ops/_conj_copy.h>
#include <ATen/ops/_neg_view_copy.h>
#include <ATen/ops/as_strided_copy.h>
#include <ATen/ops/_sparse_broadcast_to_copy.h>
#include <ATen/ops/diagonal_copy.h>
#include <ATen/ops/expand_copy.h>
#include <ATen/ops/permute_copy.h>
#include <ATen/ops/_reshape_alias_copy.h>
#include <ATen/ops/select_copy.h>
#include <ATen/ops/detach_copy.h>
#include <ATen/ops/slice_copy.h>
#include <ATen/ops/split_copy.h>
#include <ATen/ops/split_with_sizes_copy.h>
#include <ATen/ops/squeeze_copy.h>
#include <ATen/ops/squeeze_copy.h>
#include <ATen/ops/squeeze_copy.h>
#include <ATen/ops/t_copy.h>
#include <ATen/ops/transpose_copy.h>
#include <ATen/ops/unsqueeze_copy.h>
#include <ATen/ops/_indices_copy.h>
#include <ATen/ops/_values_copy.h>
#include <ATen/ops/indices_copy.h>
#include <ATen/ops/values_copy.h>
#include <ATen/ops/crow_indices_copy.h>
#include <ATen/ops/col_indices_copy.h>
#include <ATen/ops/ccol_indices_copy.h>
#include <ATen/ops/row_indices_copy.h>
#include <ATen/ops/unbind_copy.h>
#include <ATen/ops/unbind_copy.h>
#include <ATen/ops/split_copy.h>
#include <ATen/ops/split_with_sizes_copy.h>
#include <ATen/ops/view_copy.h>
#include <ATen/ops/view_copy.h>
#include <ATen/ops/unfold_copy.h>
#include <ATen/ops/alias_copy.h>
#include <ATen/ops/to_padded_tensor.h>
#include <ATen/ops/_jagged_to_padded_dense_forward.h>
#include <ATen/ops/_padded_dense_to_jagged_forward.h>
#include <ATen/ops/_nested_from_padded_tensor.h>
#include <ATen/ops/_nested_tensor_softmax_with_shape.h>
#include <ATen/ops/_safe_softmax.h>
#include <ATen/ops/_transformer_encoder_layer_fwd.h>
#include <ATen/ops/_native_multi_head_attention.h>
#include <ATen/ops/scaled_dot_product_attention.h>
#include <ATen/ops/_fused_sdp_choice.h>
#include <ATen/ops/_scaled_dot_product_attention_math.h>
#include <ATen/ops/_scaled_dot_product_attention_math_for_mps.h>
#include <ATen/ops/_scaled_dot_product_flash_attention.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu.h>
#include <ATen/ops/_scaled_dot_product_fused_attention_overrideable.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_backward.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward.h>
#include <ATen/ops/_scaled_dot_product_fused_attention_overrideable_backward.h>
#include <ATen/ops/_scaled_dot_product_efficient_attention.h>
#include <ATen/ops/_scaled_dot_product_efficient_attention_backward.h>
#include <ATen/ops/_scaled_dot_product_cudnn_attention.h>
#include <ATen/ops/_scaled_dot_product_cudnn_attention_backward.h>
#include <ATen/ops/_flash_attention_forward.h>
#include <ATen/ops/_flash_attention_backward.h>
#include <ATen/ops/_efficient_attention_forward.h>
#include <ATen/ops/_efficient_attention_backward.h>
#include <ATen/ops/_cudnn_attention_forward.h>
#include <ATen/ops/_triton_scaled_dot_attention.h>
#include <ATen/ops/_fill_mem_eff_dropout_mask.h>
#include <ATen/ops/_triton_multi_head_attention.h>
#include <ATen/ops/special_airy_ai.h>
#include <ATen/ops/special_airy_ai.h>
#include <ATen/ops/special_bessel_j0.h>
#include <ATen/ops/special_bessel_j0.h>
#include <ATen/ops/special_bessel_j1.h>
#include <ATen/ops/special_bessel_j1.h>
#include <ATen/ops/special_bessel_y0.h>
#include <ATen/ops/special_bessel_y0.h>
#include <ATen/ops/special_bessel_y1.h>
#include <ATen/ops/special_bessel_y1.h>
#include <ATen/ops/special_chebyshev_polynomial_t.h>
#include <ATen/ops/special_chebyshev_polynomial_t.h>
#include <ATen/ops/special_chebyshev_polynomial_t.h>
#include <ATen/ops/special_chebyshev_polynomial_t.h>
#include <ATen/ops/special_chebyshev_polynomial_t.h>
#include <ATen/ops/special_chebyshev_polynomial_t.h>
#include <ATen/ops/special_chebyshev_polynomial_u.h>
#include <ATen/ops/special_chebyshev_polynomial_u.h>
#include <ATen/ops/special_chebyshev_polynomial_u.h>
#include <ATen/ops/special_chebyshev_polynomial_u.h>
#include <ATen/ops/special_chebyshev_polynomial_u.h>
#include <ATen/ops/special_chebyshev_polynomial_u.h>
#include <ATen/ops/special_chebyshev_polynomial_v.h>
#include <ATen/ops/special_chebyshev_polynomial_v.h>
#include <ATen/ops/special_chebyshev_polynomial_v.h>
#include <ATen/ops/special_chebyshev_polynomial_v.h>
#include <ATen/ops/special_chebyshev_polynomial_v.h>
#include <ATen/ops/special_chebyshev_polynomial_v.h>
#include <ATen/ops/special_chebyshev_polynomial_w.h>
#include <ATen/ops/special_chebyshev_polynomial_w.h>
#include <ATen/ops/special_chebyshev_polynomial_w.h>
#include <ATen/ops/special_chebyshev_polynomial_w.h>
#include <ATen/ops/special_chebyshev_polynomial_w.h>
#include <ATen/ops/special_chebyshev_polynomial_w.h>
#include <ATen/ops/special_hermite_polynomial_h.h>
#include <ATen/ops/special_hermite_polynomial_h.h>
#include <ATen/ops/special_hermite_polynomial_h.h>
#include <ATen/ops/special_hermite_polynomial_h.h>
#include <ATen/ops/special_hermite_polynomial_h.h>
#include <ATen/ops/special_hermite_polynomial_h.h>
#include <ATen/ops/special_hermite_polynomial_he.h>
#include <ATen/ops/special_hermite_polynomial_he.h>
#include <ATen/ops/special_hermite_polynomial_he.h>
#include <ATen/ops/special_hermite_polynomial_he.h>
#include <ATen/ops/special_hermite_polynomial_he.h>
#include <ATen/ops/special_hermite_polynomial_he.h>
#include <ATen/ops/special_laguerre_polynomial_l.h>
#include <ATen/ops/special_laguerre_polynomial_l.h>
#include <ATen/ops/special_laguerre_polynomial_l.h>
#include <ATen/ops/special_laguerre_polynomial_l.h>
#include <ATen/ops/special_laguerre_polynomial_l.h>
#include <ATen/ops/special_laguerre_polynomial_l.h>
#include <ATen/ops/special_legendre_polynomial_p.h>
#include <ATen/ops/special_legendre_polynomial_p.h>
#include <ATen/ops/special_legendre_polynomial_p.h>
#include <ATen/ops/special_legendre_polynomial_p.h>
#include <ATen/ops/special_legendre_polynomial_p.h>
#include <ATen/ops/special_legendre_polynomial_p.h>
#include <ATen/ops/special_modified_bessel_i0.h>
#include <ATen/ops/special_modified_bessel_i0.h>
#include <ATen/ops/special_modified_bessel_i1.h>
#include <ATen/ops/special_modified_bessel_i1.h>
#include <ATen/ops/special_modified_bessel_k0.h>
#include <ATen/ops/special_modified_bessel_k0.h>
#include <ATen/ops/special_modified_bessel_k1.h>
#include <ATen/ops/special_modified_bessel_k1.h>
#include <ATen/ops/special_scaled_modified_bessel_k0.h>
#include <ATen/ops/special_scaled_modified_bessel_k0.h>
#include <ATen/ops/special_scaled_modified_bessel_k1.h>
#include <ATen/ops/special_scaled_modified_bessel_k1.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w.h>
#include <ATen/ops/special_spherical_bessel_j0.h>
#include <ATen/ops/special_spherical_bessel_j0.h>
#include <ATen/ops/_foobar.h>
#include <ATen/ops/_fused_adam.h>
#include <ATen/ops/_fused_adam.h>
#include <ATen/ops/_fused_adamw.h>
#include <ATen/ops/_fused_adamw.h>
#include <ATen/ops/_fused_sgd.h>
#include <ATen/ops/_fused_sgd.h>
#include <ATen/ops/_fused_adagrad.h>
#include <ATen/ops/_propagate_xla_data.h>
#include <ATen/ops/_new_zeros_with_same_feature_meta.h>
#include <ATen/ops/_cudnn_ctc_loss.h>
#include <ATen/ops/_cudnn_rnn_flatten_weight.h>
#include <ATen/ops/_cudnn_rnn.h>
#include <ATen/ops/_cudnn_rnn_backward.h>
#include <ATen/ops/_cudnn_init_dropout_state.h>
#include <ATen/ops/_fused_dropout.h>
#include <ATen/ops/_masked_scale.h>
#include <ATen/ops/native_dropout.h>
#include <ATen/ops/native_dropout_backward.h>
#include <ATen/ops/_conj_physical.h>
#include <ATen/ops/avg_pool1d.h>
#include <ATen/ops/adaptive_avg_pool1d.h>
#include <ATen/ops/_add_relu.h>
#include <ATen/ops/add.h>
#include <ATen/ops/affine_grid_generator.h>
#include <ATen/ops/_test_functorch_fallback.h>
#include <ATen/ops/bartlett_window.h>
#include <ATen/ops/bartlett_window.h>
#include <ATen/ops/quantized_batch_norm.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/bernoulli.h>
#include <ATen/ops/binary_cross_entropy_with_logits.h>
#include <ATen/ops/bincount.h>
#include <ATen/ops/blackman_window.h>
#include <ATen/ops/blackman_window.h>
#include <ATen/ops/block_diag.h>
#include <ATen/ops/constant_pad_nd.h>
#include <ATen/ops/convolution.h>
#include <ATen/ops/convolution_backward.h>
#include <ATen/ops/convolution_overrideable.h>
#include <ATen/ops/convolution_backward_overrideable.h>
#include <ATen/ops/_convolution.h>
#include <ATen/ops/conv_tbc.h>
#include <ATen/ops/copy.h>
#include <ATen/ops/_copy_from.h>
#include <ATen/ops/_copy_from_and_resize.h>
#include <ATen/ops/count_nonzero.h>
#include <ATen/ops/count_nonzero.h>
#include <ATen/ops/cudnn_affine_grid_generator.h>
#include <ATen/ops/cudnn_affine_grid_generator_backward.h>
#include <ATen/ops/cudnn_batch_norm.h>
#include <ATen/ops/cudnn_batch_norm_backward.h>
#include <ATen/ops/cudnn_convolution_transpose.h>
#include <ATen/ops/_mps_convolution_transpose.h>
#include <ATen/ops/mps_convolution_transpose_backward.h>
#include <ATen/ops/cudnn_convolution_relu.h>
#include <ATen/ops/cudnn_convolution_add_relu.h>
#include <ATen/ops/cudnn_grid_sampler.h>
#include <ATen/ops/cudnn_grid_sampler_backward.h>
#include <ATen/ops/_ctc_loss.h>
#include <ATen/ops/_ctc_loss.h>
#include <ATen/ops/_ctc_loss_backward.h>
#include <ATen/ops/diag_embed.h>
#include <ATen/ops/diagonal_backward.h>
#include <ATen/ops/div.h>
#include <ATen/ops/div.h>
#include <ATen/ops/embedding.h>
#include <ATen/ops/embedding_dense_backward.h>
#include <ATen/ops/embedding_renorm.h>
#include <ATen/ops/embedding_renorm.h>
#include <ATen/ops/_embedding_bag_forward_only.h>
#include <ATen/ops/_embedding_bag.h>
#include <ATen/ops/_embedding_bag_dense_backward.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_permuted.h>
#include <ATen/ops/new_empty.h>
#include <ATen/ops/new_empty_strided.h>
#include <ATen/ops/new_full.h>
#include <ATen/ops/new_zeros.h>
#include <ATen/ops/new_ones.h>
#include <ATen/ops/_empty_affine_quantized.h>
#include <ATen/ops/_empty_per_channel_affine_quantized.h>
#include <ATen/ops/resize.h>
#include <ATen/ops/resize.h>
#include <ATen/ops/_resize_output.h>
#include <ATen/ops/_resize_output.h>
#include <ATen/ops/empty_quantized.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/empty_strided.h>
#include <ATen/ops/fill.h>
#include <ATen/ops/fill.h>
#include <ATen/ops/floor_divide.h>
#include <ATen/ops/full.h>
#include <ATen/ops/full_like.h>
#include <ATen/ops/from_file.h>
#include <ATen/ops/grid_sampler_2d.h>
#include <ATen/ops/grid_sampler_2d_backward.h>
#include <ATen/ops/_grid_sampler_2d_cpu_fallback.h>
#include <ATen/ops/grid_sampler_3d.h>
#include <ATen/ops/grid_sampler_3d_backward.h>
#include <ATen/ops/hann_window.h>
#include <ATen/ops/hann_window.h>
#include <ATen/ops/hamming_window.h>
#include <ATen/ops/hamming_window.h>
#include <ATen/ops/hamming_window.h>
#include <ATen/ops/hamming_window.h>
#include <ATen/ops/kaiser_window.h>
#include <ATen/ops/kaiser_window.h>
#include <ATen/ops/kaiser_window.h>
#include <ATen/ops/native_group_norm.h>
#include <ATen/ops/native_group_norm_backward.h>
#include <ATen/ops/index_put.h>
#include <ATen/ops/_index_put_impl.h>
#include <ATen/ops/_index_put_impl.h>
#include <ATen/ops/isnan.h>
#include <ATen/ops/native_layer_norm.h>
#include <ATen/ops/native_layer_norm_backward.h>
#include <ATen/ops/linear_backward.h>
#include <ATen/ops/mkldnn_linear.h>
#include <ATen/ops/mkldnn_linear_backward_input.h>
#include <ATen/ops/mkldnn_linear_backward_weights.h>
#include <ATen/ops/mkldnn_linear_backward.h>
#include <ATen/ops/matmul_backward.h>
#include <ATen/ops/_aminmax.h>
#include <ATen/ops/_aminmax.h>
#include <ATen/ops/max_pool2d_backward.h>
#include <ATen/ops/mkldnn_max_pool2d.h>
#include <ATen/ops/mkldnn_max_pool2d_backward.h>
#include <ATen/ops/mkldnn_max_pool3d.h>
#include <ATen/ops/mkldnn_max_pool3d_backward.h>
#include <ATen/ops/quantized_max_pool1d.h>
#include <ATen/ops/quantized_max_pool2d.h>
#include <ATen/ops/quantized_max_pool3d.h>
#include <ATen/ops/median.h>
#include <ATen/ops/nanmedian.h>
#include <ATen/ops/_mps_convolution.h>
#include <ATen/ops/mps_convolution_backward.h>
#include <ATen/ops/mkldnn_convolution.h>
#include <ATen/ops/mkldnn_rnn_layer.h>
#include <ATen/ops/mkldnn_rnn_layer_backward.h>
#include <ATen/ops/miopen_batch_norm.h>
#include <ATen/ops/miopen_batch_norm_backward.h>
#include <ATen/ops/miopen_convolution.h>
#include <ATen/ops/miopen_convolution_transpose.h>
#include <ATen/ops/miopen_depthwise_convolution.h>
#include <ATen/ops/miopen_rnn.h>
#include <ATen/ops/miopen_rnn_backward.h>
#include <ATen/ops/_sparse_sparse_matmul.h>
#include <ATen/ops/mul.h>
#include <ATen/ops/_native_batch_norm_legit.h>
#include <ATen/ops/_native_batch_norm_legit_no_training.h>
#include <ATen/ops/batch_norm_stats.h>
#include <ATen/ops/batch_norm_gather_stats.h>
#include <ATen/ops/batch_norm_gather_stats_with_counts.h>
#include <ATen/ops/native_batch_norm_backward.h>
#include <ATen/ops/batch_norm_backward_reduce.h>
#include <ATen/ops/batch_norm_backward_elemt.h>
#include <ATen/ops/batch_norm_update_stats.h>
#include <ATen/ops/_nnpack_spatial_convolution.h>
#include <ATen/ops/ones.h>
#include <ATen/ops/ones_like.h>
#include <ATen/ops/_euclidean_dist.h>
#include <ATen/ops/_cdist_forward.h>
#include <ATen/ops/_cdist_backward.h>
#include <ATen/ops/_pdist_forward.h>
#include <ATen/ops/_pdist_backward.h>
#include <ATen/ops/pixel_shuffle.h>
#include <ATen/ops/pixel_unshuffle.h>
#include <ATen/ops/channel_shuffle.h>
#include <ATen/ops/_pin_memory.h>
#include <ATen/ops/scalar_tensor.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand_like.h>
#include <ATen/ops/randint_like.h>
#include <ATen/ops/randint_like.h>
#include <ATen/ops/randn.h>
#include <ATen/ops/randn.h>
#include <ATen/ops/randn_like.h>
#include <ATen/ops/repeat.h>
#include <ATen/ops/repeat_interleave.h>
#include <ATen/ops/_mkldnn_reshape.h>
#include <ATen/ops/relu.h>
#include <ATen/ops/select_backward.h>
#include <ATen/ops/celu.h>
#include <ATen/ops/slice_backward.h>
#include <ATen/ops/slice_scatter.h>
#include <ATen/ops/select_scatter.h>
#include <ATen/ops/diagonal_scatter.h>
#include <ATen/ops/as_strided_scatter.h>
#include <ATen/ops/unsafe_split.h>
#include <ATen/ops/unsafe_split_with_sizes.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/std_mean.h>
#include <ATen/ops/prod.h>
#include <ATen/ops/_mkldnn_transpose.h>
#include <ATen/ops/flip.h>
#include <ATen/ops/roll.h>
#include <ATen/ops/rot90.h>
#include <ATen/ops/_transform_bias_rescale_qkv.h>
#include <ATen/ops/_nested_tensor_from_mask.h>
#include <ATen/ops/_nested_from_padded.h>
#include <ATen/ops/_nested_tensor_size.h>
#include <ATen/ops/_nested_tensor_strides.h>
#include <ATen/ops/_nested_tensor_storage_offsets.h>
#include <ATen/ops/_nested_from_padded_and_nested_example.h>
#include <ATen/ops/_nested_view_from_buffer_copy.h>
#include <ATen/ops/_nested_view_from_jagged_copy.h>
#include <ATen/ops/_nested_get_values_copy.h>
#include <ATen/ops/_trilinear.h>
#include <ATen/ops/_unique.h>
#include <ATen/ops/unique_dim.h>
#include <ATen/ops/unique_consecutive.h>
#include <ATen/ops/unique_dim_consecutive.h>
#include <ATen/ops/_unique2.h>
#include <ATen/ops/_unsafe_view.h>
#include <ATen/ops/var_mean.h>
#include <ATen/ops/_weight_norm_interface.h>
#include <ATen/ops/_weight_norm_interface_backward.h>
#include <ATen/ops/zeros.h>
#include <ATen/ops/_efficientzerotensor.h>
#include <ATen/ops/zeros_like.h>
#include <ATen/ops/_standard_gamma_grad.h>
#include <ATen/ops/_standard_gamma.h>
#include <ATen/ops/_dirichlet_grad.h>
#include <ATen/ops/_sample_dirichlet.h>
#include <ATen/ops/poisson.h>
#include <ATen/ops/binomial.h>
#include <ATen/ops/native_norm.h>
#include <ATen/ops/native_norm.h>
#include <ATen/ops/_batch_norm_with_update.h>
#include <ATen/ops/_batch_norm_no_update.h>
#include <ATen/ops/_sparse_sum.h>
#include <ATen/ops/_sparse_sum_backward.h>
#include <ATen/ops/_sparse_csr_sum.h>
#include <ATen/ops/_sparse_csr_prod.h>
#include <ATen/ops/_sparse_softmax.h>
#include <ATen/ops/_sparse_softmax_backward_data.h>
#include <ATen/ops/_sparse_log_softmax.h>
#include <ATen/ops/_sparse_log_softmax_backward_data.h>
#include <ATen/ops/_spdiags.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/norm.h>
#include <ATen/ops/clone.h>
#include <ATen/ops/resize_as.h>
#include <ATen/ops/resize_as.h>
#include <ATen/ops/resize_as_sparse.h>
#include <ATen/ops/resize_as_sparse.h>
#include <ATen/ops/zero.h>
#include <ATen/ops/zero.h>
#include <ATen/ops/sub.h>
#include <ATen/ops/rsub.h>
#include <ATen/ops/rsub.h>
#include <ATen/ops/_sparse_addmm.h>
#include <ATen/ops/sparse_coo_tensor.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
#include <ATen/ops/sparse_resize.h>
#include <ATen/ops/sparse_resize.h>
#include <ATen/ops/sparse_resize_and_clear.h>
#include <ATen/ops/sparse_resize_and_clear.h>
#include <ATen/ops/sparse_mask.h>
#include <ATen/ops/_sparse_mask_projection.h>
#include <ATen/ops/_to_dense.h>
#include <ATen/ops/_coalesce.h>
#include <ATen/ops/_coalesced.h>
#include <ATen/ops/_coalesced.h>
#include <ATen/ops/copy_sparse_to_sparse.h>
#include <ATen/ops/copy_sparse_to_sparse.h>
#include <ATen/ops/_to_sparse.h>
#include <ATen/ops/_to_sparse.h>
#include <ATen/ops/_to_sparse_csr.h>
#include <ATen/ops/_to_sparse_csc.h>
#include <ATen/ops/_to_sparse_bsr.h>
#include <ATen/ops/_to_sparse_bsc.h>
#include <ATen/ops/to_mkldnn.h>
#include <ATen/ops/mkldnn_reorder_conv2d_weight.h>
#include <ATen/ops/mkldnn_reorder_conv3d_weight.h>
#include <ATen/ops/quantize_per_tensor_dynamic.h>
#include <ATen/ops/quantize_per_tensor.h>
#include <ATen/ops/quantize_per_tensor.h>
#include <ATen/ops/quantize_per_tensor.h>
#include <ATen/ops/quantize_per_channel.h>
#include <ATen/ops/dequantize.h>
#include <ATen/ops/dequantize.h>
#include <ATen/ops/q_per_channel_scales.h>
#include <ATen/ops/q_per_channel_zero_points.h>
#include <ATen/ops/int_repr.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor.h>
#include <ATen/ops/_make_per_channel_quantized_tensor.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine.h>
#include <ATen/ops/_fused_moving_avg_obs_fq_helper.h>
#include <ATen/ops/_fused_moving_avg_obs_fq_helper.h>
#include <ATen/ops/_to_copy.h>
#include <ATen/ops/_lstm_mps.h>
#include <ATen/ops/lstm_mps_backward.h>
#include <ATen/ops/_thnn_fused_lstm_cell.h>
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl.h>
#include <ATen/ops/_thnn_fused_gru_cell.h>
#include <ATen/ops/_thnn_fused_gru_cell_backward.h>
#include <ATen/ops/_pack_padded_sequence.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/set.h>
#include <ATen/ops/lift.h>
#include <ATen/ops/lift_fresh_copy.h>
#include <ATen/ops/masked_fill.h>
#include <ATen/ops/masked_fill.h>
#include <ATen/ops/masked_scatter.h>
#include <ATen/ops/_masked_softmax.h>
#include <ATen/ops/_masked_softmax_backward.h>
#include <ATen/ops/put.h>
#include <ATen/ops/index_fill.h>
#include <ATen/ops/index_fill.h>
#include <ATen/ops/bitwise_and.h>
#include <ATen/ops/bitwise_or.h>
#include <ATen/ops/bitwise_xor.h>
#include <ATen/ops/lshift.h>
#include <ATen/ops/lshift.h>
#include <ATen/ops/bitwise_left_shift.h>
#include <ATen/ops/rshift.h>
#include <ATen/ops/rshift.h>
#include <ATen/ops/bitwise_right_shift.h>
#include <ATen/ops/random.h>
#include <ATen/ops/random.h>
#include <ATen/ops/random.h>
#include <ATen/ops/random.h>
#include <ATen/ops/random.h>
#include <ATen/ops/random.h>
#include <ATen/ops/uniform.h>
#include <ATen/ops/uniform.h>
#include <ATen/ops/cauchy.h>
#include <ATen/ops/cauchy.h>
#include <ATen/ops/log_normal.h>
#include <ATen/ops/log_normal.h>
#include <ATen/ops/exponential.h>
#include <ATen/ops/exponential.h>
#include <ATen/ops/geometric.h>
#include <ATen/ops/geometric.h>
#include <ATen/ops/tril_indices.h>
#include <ATen/ops/triu_indices.h>
#include <ATen/ops/trace.h>
#include <ATen/ops/_cholesky_solve_helper.h>
#include <ATen/ops/dist.h>
#include <ATen/ops/_histogramdd_bin_edges.h>
#include <ATen/ops/_histogramdd_from_bin_cts.h>
#include <ATen/ops/_histogramdd_from_bin_tensors.h>
#include <ATen/ops/remainder.h>
#include <ATen/ops/unfold_backward.h>
#include <ATen/ops/normal.h>
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale.h>
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale.h>
#include <ATen/ops/_amp_update_scale.h>
#include <ATen/ops/_amp_update_scale.h>
#include <ATen/ops/_foreach_add.h>
#include <ATen/ops/_foreach_add.h>
#include <ATen/ops/_foreach_add.h>
#include <ATen/ops/_foreach_add.h>
#include <ATen/ops/_foreach_sub.h>
#include <ATen/ops/_foreach_sub.h>
#include <ATen/ops/_foreach_sub.h>
#include <ATen/ops/_foreach_mul.h>
#include <ATen/ops/_foreach_mul.h>
#include <ATen/ops/_foreach_mul.h>
#include <ATen/ops/_foreach_mul.h>
#include <ATen/ops/_foreach_div.h>
#include <ATen/ops/_foreach_div.h>
#include <ATen/ops/_foreach_div.h>
#include <ATen/ops/_foreach_div.h>
#include <ATen/ops/_foreach_clamp_max.h>
#include <ATen/ops/_foreach_clamp_max.h>
#include <ATen/ops/_foreach_clamp_max.h>
#include <ATen/ops/_foreach_clamp_min.h>
#include <ATen/ops/_foreach_clamp_min.h>
#include <ATen/ops/_foreach_clamp_min.h>
#include <ATen/ops/_foreach_maximum.h>
#include <ATen/ops/_foreach_maximum.h>
#include <ATen/ops/_foreach_maximum.h>
#include <ATen/ops/_foreach_minimum.h>
#include <ATen/ops/_foreach_minimum.h>
#include <ATen/ops/_foreach_minimum.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcdiv.h>
#include <ATen/ops/_foreach_addcmul.h>
#include <ATen/ops/_foreach_addcmul.h>
#include <ATen/ops/_foreach_addcmul.h>
#include <ATen/ops/_foreach_abs.h>
#include <ATen/ops/_foreach_acos.h>
#include <ATen/ops/_foreach_asin.h>
#include <ATen/ops/_foreach_atan.h>
#include <ATen/ops/_foreach_ceil.h>
#include <ATen/ops/_foreach_cos.h>
#include <ATen/ops/_foreach_cosh.h>
#include <ATen/ops/_foreach_erf.h>
#include <ATen/ops/_foreach_erfc.h>
#include <ATen/ops/_foreach_exp.h>
#include <ATen/ops/_foreach_expm1.h>
#include <ATen/ops/_foreach_floor.h>
#include <ATen/ops/_foreach_frac.h>
#include <ATen/ops/_foreach_lerp.h>
#include <ATen/ops/_foreach_lerp.h>
#include <ATen/ops/_foreach_lerp.h>
#include <ATen/ops/_foreach_lgamma.h>
#include <ATen/ops/_foreach_log.h>
#include <ATen/ops/_foreach_log10.h>
#include <ATen/ops/_foreach_log1p.h>
#include <ATen/ops/_foreach_log2.h>
#include <ATen/ops/_foreach_max.h>
#include <ATen/ops/_foreach_neg.h>
#include <ATen/ops/_foreach_norm.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_pow.h>
#include <ATen/ops/_foreach_reciprocal.h>
#include <ATen/ops/_foreach_round.h>
#include <ATen/ops/_foreach_rsqrt.h>
#include <ATen/ops/_foreach_sigmoid.h>
#include <ATen/ops/_foreach_sign.h>
#include <ATen/ops/_foreach_sin.h>
#include <ATen/ops/_foreach_sinh.h>
#include <ATen/ops/_foreach_sqrt.h>
#include <ATen/ops/_foreach_tan.h>
#include <ATen/ops/_foreach_tanh.h>
#include <ATen/ops/_foreach_trunc.h>
#include <ATen/ops/_foreach_zero.h>
#include <ATen/ops/_foreach_zero.h>
#include <ATen/ops/_foreach_copy.h>
#include <ATen/ops/bucketize.h>
#include <ATen/ops/glu_jvp.h>
#include <ATen/ops/glu_backward_jvp.h>
#include <ATen/ops/hardswish_backward.h>
#include <ATen/ops/rrelu_with_noise.h>
#include <ATen/ops/rrelu_with_noise_backward.h>
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward.h>
#include <ATen/ops/_adaptive_avg_pool2d.h>
#include <ATen/ops/_adaptive_avg_pool2d_backward.h>
#include <ATen/ops/_adaptive_avg_pool3d.h>
#include <ATen/ops/_adaptive_avg_pool3d_backward.h>
#include <ATen/ops/upsample_bilinear2d.h>
#include <ATen/ops/upsample_nearest2d.h>
#include <ATen/ops/_slow_conv2d_backward.h>
#include <ATen/ops/conv_depthwise3d.h>
#include <ATen/ops/slow_conv_dilated2d.h>
#include <ATen/ops/slow_conv_dilated3d.h>
#include <ATen/ops/isinf.h>
#include <ATen/ops/linalg_matrix_exp.h>
#include <ATen/ops/_test_optional_intlist.h>
#include <ATen/ops/_test_optional_filled_intlist.h>
#include <ATen/ops/_test_optional_floatlist.h>
#include <ATen/ops/_test_warn_in_autograd.h>
#include <ATen/ops/_test_autograd_multiple_dispatch.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy.h>
#include <ATen/ops/segment_reduce.h>
#include <ATen/ops/_segment_reduce_backward.h>
#include <ATen/ops/_nested_tensor_from_tensor_list.h>
#include <ATen/ops/_fw_primal_copy.h>
#include <ATen/ops/_make_dual_copy.h>
#include <ATen/ops/view_as_real_copy.h>
#include <ATen/ops/view_as_complex_copy.h>
#include <ATen/ops/_conj_copy.h>
#include <ATen/ops/_neg_view_copy.h>
#include <ATen/ops/as_strided_copy.h>
#include <ATen/ops/_sparse_broadcast_to_copy.h>
#include <ATen/ops/diagonal_copy.h>
#include <ATen/ops/expand_copy.h>
#include <ATen/ops/permute_copy.h>
#include <ATen/ops/_reshape_alias_copy.h>
#include <ATen/ops/select_copy.h>
#include <ATen/ops/detach_copy.h>
#include <ATen/ops/slice_copy.h>
#include <ATen/ops/squeeze_copy.h>
#include <ATen/ops/squeeze_copy.h>
#include <ATen/ops/squeeze_copy.h>
#include <ATen/ops/t_copy.h>
#include <ATen/ops/transpose_copy.h>
#include <ATen/ops/unsqueeze_copy.h>
#include <ATen/ops/_indices_copy.h>
#include <ATen/ops/_values_copy.h>
#include <ATen/ops/indices_copy.h>
#include <ATen/ops/values_copy.h>
#include <ATen/ops/crow_indices_copy.h>
#include <ATen/ops/col_indices_copy.h>
#include <ATen/ops/ccol_indices_copy.h>
#include <ATen/ops/row_indices_copy.h>
#include <ATen/ops/view_copy.h>
#include <ATen/ops/view_copy.h>
#include <ATen/ops/unfold_copy.h>
#include <ATen/ops/alias_copy.h>
#include <ATen/ops/to_padded_tensor.h>
#include <ATen/ops/_transformer_encoder_layer_fwd.h>
#include <ATen/ops/_native_multi_head_attention.h>
#include <ATen/ops/_triton_scaled_dot_attention.h>
#include <ATen/ops/_triton_multi_head_attention.h>
#include <ATen/ops/_foobar.h>
#include <ATen/ops/_fused_adam.h>
#include <ATen/ops/_fused_adam.h>
#include <ATen/ops/_fused_adam.h>
#include <ATen/ops/_fused_adam.h>
#include <ATen/ops/_fused_adamw.h>
#include <ATen/ops/_fused_adamw.h>
#include <ATen/ops/_fused_adamw.h>
#include <ATen/ops/_fused_adamw.h>
#include <ATen/ops/_fused_sgd.h>
#include <ATen/ops/_fused_sgd.h>
#include <ATen/ops/_fused_sgd.h>
#include <ATen/ops/_fused_sgd.h>
#include <ATen/ops/_fused_adagrad.h>
#include <ATen/ops/_fused_adagrad.h>
#endif



namespace at { namespace _ops {


// aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cast_Byte::schema> create__cast_Byte_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cast_Byte::name, _cast_Byte::overload_name)
      .typed<_cast_Byte::schema>();
}

// aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Byte::call(const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Byte_typed_handle();
    return op.call(self, non_blocking);
}

// aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Byte::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Byte_typed_handle();
    return op.redispatch(dispatchKeySet, self, non_blocking);
}

// aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cast_Char::schema> create__cast_Char_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cast_Char::name, _cast_Char::overload_name)
      .typed<_cast_Char::schema>();
}

// aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Char::call(const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Char_typed_handle();
    return op.call(self, non_blocking);
}

// aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Char::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Char_typed_handle();
    return op.redispatch(dispatchKeySet, self, non_blocking);
}

// aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cast_Double::schema> create__cast_Double_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cast_Double::name, _cast_Double::overload_name)
      .typed<_cast_Double::schema>();
}

// aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Double::call(const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Double_typed_handle();
    return op.call(self, non_blocking);
}

// aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Double::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Double_typed_handle();
    return op.redispatch(dispatchKeySet, self, non_blocking);
}

// aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cast_Float::schema> create__cast_Float_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cast_Float::name, _cast_Float::overload_name)
      .typed<_cast_Float::schema>();
}

// aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Float::call(const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Float_typed_handle();
    return op.call(self, non_blocking);
}

// aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Float::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Float_typed_handle();
    return op.redispatch(dispatchKeySet, self, non_blocking);
}

// aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cast_Int::schema> create__cast_Int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cast_Int::name, _cast_Int::overload_name)
      .typed<_cast_Int::schema>();
}

// aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Int::call(const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Int_typed_handle();
    return op.call(self, non_blocking);
}

// aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Int_typed_handle();
    return op.redispatch(dispatchKeySet, self, non_blocking);
}

// aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cast_Long::schema> create__cast_Long_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cast_Long::name, _cast_Long::overload_name)
      .typed<_cast_Long::schema>();
}

// aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Long::call(const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Long_typed_handle();
    return op.call(self, non_blocking);
}

// aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Long::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Long_typed_handle();
    return op.redispatch(dispatchKeySet, self, non_blocking);
}

// aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cast_Short::schema> create__cast_Short_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cast_Short::name, _cast_Short::overload_name)
      .typed<_cast_Short::schema>();
}

// aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Short::call(const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Short_typed_handle();
    return op.call(self, non_blocking);
}

// aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Short::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Short_typed_handle();
    return op.redispatch(dispatchKeySet, self, non_blocking);
}

// aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cast_Half::schema> create__cast_Half_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cast_Half::name, _cast_Half::overload_name)
      .typed<_cast_Half::schema>();
}

// aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Half::call(const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Half_typed_handle();
    return op.call(self, non_blocking);
}

// aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor
at::Tensor _cast_Half::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking) {
    
    static auto op = create__cast_Half_typed_handle();
    return op.redispatch(dispatchKeySet, self, non_blocking);
}

// aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_backward::schema> create__backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_backward::name, _backward::overload_name)
      .typed<_backward::schema>();
}

// aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()
void _backward::call(const at::Tensor & self, at::TensorList inputs, const ::std::optional<at::Tensor> & gradient, ::std::optional<bool> retain_graph, bool create_graph) {
    
    static auto op = create__backward_typed_handle();
    return op.call(self, inputs, gradient, retain_graph, create_graph);
}

// aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()
void _backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList inputs, const ::std::optional<at::Tensor> & gradient, ::std::optional<bool> retain_graph, bool create_graph) {
    
    static auto op = create__backward_typed_handle();
    return op.redispatch(dispatchKeySet, self, inputs, gradient, retain_graph, create_graph);
}

// aten::set_data(Tensor(a!) self, Tensor new_data) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<set_data::schema> create_set_data_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_data::name, set_data::overload_name)
      .typed<set_data::schema>();
}

// aten::set_data(Tensor(a!) self, Tensor new_data) -> ()
void set_data::call(at::Tensor & self, const at::Tensor & new_data) {
    
    static auto op = create_set_data_typed_handle();
    return op.call(self, new_data);
}

// aten::set_data(Tensor(a!) self, Tensor new_data) -> ()
void set_data::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & new_data) {
    
    static auto op = create_set_data_typed_handle();
    return op.redispatch(dispatchKeySet, self, new_data);
}

// aten::data(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<data::schema> create_data_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(data::name, data::overload_name)
      .typed<data::schema>();
}

// aten::data(Tensor self) -> Tensor
at::Tensor data::call(const at::Tensor & self) {
    
    static auto op = create_data_typed_handle();
    return op.call(self);
}

// aten::data(Tensor self) -> Tensor
at::Tensor data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_data_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::is_leaf(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_leaf::schema> create_is_leaf_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_leaf::name, is_leaf::overload_name)
      .typed<is_leaf::schema>();
}

// aten::is_leaf(Tensor self) -> bool
bool is_leaf::call(const at::Tensor & self) {
    
    static auto op = create_is_leaf_typed_handle();
    return op.call(self);
}

// aten::is_leaf(Tensor self) -> bool
bool is_leaf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_is_leaf_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::output_nr(Tensor self) -> int
static C10_NOINLINE c10::TypedOperatorHandle<output_nr::schema> create_output_nr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(output_nr::name, output_nr::overload_name)
      .typed<output_nr::schema>();
}

// aten::output_nr(Tensor self) -> int
int64_t output_nr::call(const at::Tensor & self) {
    
    static auto op = create_output_nr_typed_handle();
    return op.call(self);
}

// aten::output_nr(Tensor self) -> int
int64_t output_nr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_output_nr_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_version(Tensor self) -> int
static C10_NOINLINE c10::TypedOperatorHandle<_version::schema> create__version_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_version::name, _version::overload_name)
      .typed<_version::schema>();
}

// aten::_version(Tensor self) -> int
int64_t _version::call(const at::Tensor & self) {
    
    static auto op = create__version_typed_handle();
    return op.call(self);
}

// aten::_version(Tensor self) -> int
int64_t _version::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__version_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<requires_grad_::schema> create_requires_grad__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(requires_grad_::name, requires_grad_::overload_name)
      .typed<requires_grad_::schema>();
}

// aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)
at::Tensor & requires_grad_::call(at::Tensor & self, bool requires_grad) {
    
    static auto op = create_requires_grad__typed_handle();
    return op.call(self, requires_grad);
}

// aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)
at::Tensor & requires_grad_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, bool requires_grad) {
    
    static auto op = create_requires_grad__typed_handle();
    return op.redispatch(dispatchKeySet, self, requires_grad);
}

// aten::retain_grad(Tensor(a!) self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<retain_grad::schema> create_retain_grad_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(retain_grad::name, retain_grad::overload_name)
      .typed<retain_grad::schema>();
}

// aten::retain_grad(Tensor(a!) self) -> ()
void retain_grad::call(at::Tensor & self) {
    
    static auto op = create_retain_grad_typed_handle();
    return op.call(self);
}

// aten::retain_grad(Tensor(a!) self) -> ()
void retain_grad::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_retain_grad_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::retains_grad(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<retains_grad::schema> create_retains_grad_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(retains_grad::name, retains_grad::overload_name)
      .typed<retains_grad::schema>();
}

// aten::retains_grad(Tensor self) -> bool
bool retains_grad::call(const at::Tensor & self) {
    
    static auto op = create_retains_grad_typed_handle();
    return op.call(self);
}

// aten::retains_grad(Tensor self) -> bool
bool retains_grad::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_retains_grad_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<_fw_primal::schema> create__fw_primal_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fw_primal::name, _fw_primal::overload_name)
      .typed<_fw_primal::schema>();
}

// aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a)
at::Tensor _fw_primal::call(const at::Tensor & self, int64_t level) {
    
    static auto op = create__fw_primal_typed_handle();
    return op.call(self, level);
}

// aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a)
at::Tensor _fw_primal::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level) {
    
    static auto op = create__fw_primal_typed_handle();
    return op.redispatch(dispatchKeySet, self, level);
}

// aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<_make_dual::schema> create__make_dual_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_make_dual::name, _make_dual::overload_name)
      .typed<_make_dual::schema>();
}

// aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)
at::Tensor _make_dual::call(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
    
    static auto op = create__make_dual_typed_handle();
    return op.call(primal, tangent, level);
}

// aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)
at::Tensor _make_dual::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
    
    static auto op = create__make_dual_typed_handle();
    return op.redispatch(dispatchKeySet, primal, tangent, level);
}

// aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)
static C10_NOINLINE c10::TypedOperatorHandle<_unpack_dual::schema> create__unpack_dual_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_unpack_dual::name, _unpack_dual::overload_name)
      .typed<_unpack_dual::schema>();
}

// aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)
::std::tuple<at::Tensor,at::Tensor> _unpack_dual::call(const at::Tensor & dual, int64_t level) {
    
    static auto op = create__unpack_dual_typed_handle();
    return op.call(dual, level);
}

// aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)
::std::tuple<at::Tensor,at::Tensor> _unpack_dual::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dual, int64_t level) {
    
    static auto op = create__unpack_dual_typed_handle();
    return op.redispatch(dispatchKeySet, dual, level);
}

// aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_new_zeros_with_same_feature_meta::schema> create__new_zeros_with_same_feature_meta_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_new_zeros_with_same_feature_meta::name, _new_zeros_with_same_feature_meta::overload_name)
      .typed<_new_zeros_with_same_feature_meta::schema>();
}

// aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor
at::Tensor _new_zeros_with_same_feature_meta::call(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims) {
    
    static auto op = create__new_zeros_with_same_feature_meta_typed_handle();
    return op.call(self, other, self_num_batch_dims);
}

// aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor
at::Tensor _new_zeros_with_same_feature_meta::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims) {
    
    static auto op = create__new_zeros_with_same_feature_meta_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, self_num_batch_dims);
}

// aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<_has_same_storage_numel::schema> create__has_same_storage_numel_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_has_same_storage_numel::name, _has_same_storage_numel::overload_name)
      .typed<_has_same_storage_numel::schema>();
}

// aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool
bool _has_same_storage_numel::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create__has_same_storage_numel_typed_handle();
    return op.call(self, other);
}

// aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool
bool _has_same_storage_numel::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create__has_same_storage_numel_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rename_::schema> create_rename__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rename_::name, rename_::overload_name)
      .typed<rename_::schema>();
}

// aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
at::Tensor & rename_::call(at::Tensor & self, ::std::optional<at::DimnameList> names) {
    
    static auto op = create_rename__typed_handle();
    return op.call(self, names);
}

// aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
at::Tensor & rename_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, ::std::optional<at::DimnameList> names) {
    
    static auto op = create_rename__typed_handle();
    return op.redispatch(dispatchKeySet, self, names);
}

// aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<rename::schema> create_rename_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rename::name, rename::overload_name)
      .typed<rename::schema>();
}

// aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)
at::Tensor rename::call(const at::Tensor & self, ::std::optional<at::DimnameList> names) {
    
    static auto op = create_rename_typed_handle();
    return op.call(self, names);
}

// aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)
at::Tensor rename::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::DimnameList> names) {
    
    static auto op = create_rename_typed_handle();
    return op.redispatch(dispatchKeySet, self, names);
}

// aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<align_to::schema> create_align_to_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(align_to::name, align_to::overload_name)
      .typed<align_to::schema>();
}

// aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)
at::Tensor align_to::call(const at::Tensor & self, at::DimnameList names) {
    
    static auto op = create_align_to_typed_handle();
    return op.call(self, names);
}

// aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)
at::Tensor align_to::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList names) {
    
    static auto op = create_align_to_typed_handle();
    return op.redispatch(dispatchKeySet, self, names);
}

// aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<align_to_ellipsis_idx::schema> create_align_to_ellipsis_idx_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(align_to_ellipsis_idx::name, align_to_ellipsis_idx::overload_name)
      .typed<align_to_ellipsis_idx::schema>();
}

// aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)
at::Tensor align_to_ellipsis_idx::call(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) {
    
    static auto op = create_align_to_ellipsis_idx_typed_handle();
    return op.call(self, order, ellipsis_idx);
}

// aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)
at::Tensor align_to_ellipsis_idx::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) {
    
    static auto op = create_align_to_ellipsis_idx_typed_handle();
    return op.redispatch(dispatchKeySet, self, order, ellipsis_idx);
}

// aten::align_as(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<align_as::schema> create_align_as_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(align_as::name, align_as::overload_name)
      .typed<align_as::schema>();
}

// aten::align_as(Tensor self, Tensor other) -> Tensor
at::Tensor align_as::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_align_as_typed_handle();
    return op.call(self, other);
}

// aten::align_as(Tensor self, Tensor other) -> Tensor
at::Tensor align_as::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_align_as_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::align_tensors(Tensor[] tensors) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<align_tensors::schema> create_align_tensors_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(align_tensors::name, align_tensors::overload_name)
      .typed<align_tensors::schema>();
}

// aten::align_tensors(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> align_tensors::call(at::TensorList tensors) {
    
    static auto op = create_align_tensors_typed_handle();
    return op.call(tensors);
}

// aten::align_tensors(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> align_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_align_tensors_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::_assert_async(Tensor self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_assert_async::schema> create__assert_async_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_assert_async::name, _assert_async::overload_name)
      .typed<_assert_async::schema>();
}

// aten::_assert_async(Tensor self) -> ()
void _assert_async::call(const at::Tensor & self) {
    
    static auto op = create__assert_async_typed_handle();
    return op.call(self);
}

// aten::_assert_async(Tensor self) -> ()
void _assert_async::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__assert_async_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_assert_async.msg(Tensor self, str assert_msg) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_assert_async_msg::schema> create__assert_async_msg_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_assert_async_msg::name, _assert_async_msg::overload_name)
      .typed<_assert_async_msg::schema>();
}

// aten::_assert_async.msg(Tensor self, str assert_msg) -> ()
void _assert_async_msg::call(const at::Tensor & self, c10::string_view assert_msg) {
    
    static auto op = create__assert_async_msg_typed_handle();
    return op.call(self, assert_msg);
}

// aten::_assert_async.msg(Tensor self, str assert_msg) -> ()
void _assert_async_msg::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view assert_msg) {
    
    static auto op = create__assert_async_msg_typed_handle();
    return op.redispatch(dispatchKeySet, self, assert_msg);
}

// aten::_assert_scalar(Scalar self, str assert_msg) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_assert_scalar::schema> create__assert_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_assert_scalar::name, _assert_scalar::overload_name)
      .typed<_assert_scalar::schema>();
}

// aten::_assert_scalar(Scalar self, str assert_msg) -> ()
void _assert_scalar::call(const at::Scalar & self, c10::string_view assert_msg) {
    
    static auto op = create__assert_scalar_typed_handle();
    return op.call(self, assert_msg);
}

// aten::_assert_scalar(Scalar self, str assert_msg) -> ()
void _assert_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, c10::string_view assert_msg) {
    
    static auto op = create__assert_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, assert_msg);
}

// aten::_functional_assert_scalar(Scalar self, str assert_msg, Tensor dep_token) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_functional_assert_scalar::schema> create__functional_assert_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_functional_assert_scalar::name, _functional_assert_scalar::overload_name)
      .typed<_functional_assert_scalar::schema>();
}

// aten::_functional_assert_scalar(Scalar self, str assert_msg, Tensor dep_token) -> Tensor
at::Tensor _functional_assert_scalar::call(const at::Scalar & self, c10::string_view assert_msg, const at::Tensor & dep_token) {
    
    static auto op = create__functional_assert_scalar_typed_handle();
    return op.call(self, assert_msg, dep_token);
}

// aten::_functional_assert_scalar(Scalar self, str assert_msg, Tensor dep_token) -> Tensor
at::Tensor _functional_assert_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, c10::string_view assert_msg, const at::Tensor & dep_token) {
    
    static auto op = create__functional_assert_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, assert_msg, dep_token);
}

// aten::_functional_assert_async.msg(Tensor self, str assert_msg, Tensor dep_token) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_functional_assert_async_msg::schema> create__functional_assert_async_msg_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_functional_assert_async_msg::name, _functional_assert_async_msg::overload_name)
      .typed<_functional_assert_async_msg::schema>();
}

// aten::_functional_assert_async.msg(Tensor self, str assert_msg, Tensor dep_token) -> Tensor
at::Tensor _functional_assert_async_msg::call(const at::Tensor & self, c10::string_view assert_msg, const at::Tensor & dep_token) {
    
    static auto op = create__functional_assert_async_msg_typed_handle();
    return op.call(self, assert_msg, dep_token);
}

// aten::_functional_assert_async.msg(Tensor self, str assert_msg, Tensor dep_token) -> Tensor
at::Tensor _functional_assert_async_msg::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view assert_msg, const at::Tensor & dep_token) {
    
    static auto op = create__functional_assert_async_msg_typed_handle();
    return op.redispatch(dispatchKeySet, self, assert_msg, dep_token);
}

// aten::_assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None, *, Device? device=None, Layout? layout=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_assert_tensor_metadata::schema> create__assert_tensor_metadata_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_assert_tensor_metadata::name, _assert_tensor_metadata::overload_name)
      .typed<_assert_tensor_metadata::schema>();
}

// aten::_assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None, *, Device? device=None, Layout? layout=None) -> ()
void _assert_tensor_metadata::call(const at::Tensor & a, at::OptionalSymIntArrayRef size, at::OptionalSymIntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Device> device, ::std::optional<at::Layout> layout) {
    
    static auto op = create__assert_tensor_metadata_typed_handle();
    return op.call(a, size, stride, dtype, device, layout);
}

// aten::_assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None, *, Device? device=None, Layout? layout=None) -> ()
void _assert_tensor_metadata::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & a, at::OptionalSymIntArrayRef size, at::OptionalSymIntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Device> device, ::std::optional<at::Layout> layout) {
    
    static auto op = create__assert_tensor_metadata_typed_handle();
    return op.redispatch(dispatchKeySet, a, size, stride, dtype, device, layout);
}

// aten::_print(str s) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_print::schema> create__print_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_print::name, _print::overload_name)
      .typed<_print::schema>();
}

// aten::_print(str s) -> ()
void _print::call(c10::string_view s) {
    
    static auto op = create__print_typed_handle();
    return op.call(s);
}

// aten::_print(str s) -> ()
void _print::redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view s) {
    
    static auto op = create__print_typed_handle();
    return op.redispatch(dispatchKeySet, s);
}

// aten::sym_constrain_range(Scalar size, *, int? min=None, int? max=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<sym_constrain_range::schema> create_sym_constrain_range_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sym_constrain_range::name, sym_constrain_range::overload_name)
      .typed<sym_constrain_range::schema>();
}

// aten::sym_constrain_range(Scalar size, *, int? min=None, int? max=None) -> ()
void sym_constrain_range::call(const at::Scalar & size, ::std::optional<int64_t> min, ::std::optional<int64_t> max) {
    
    static auto op = create_sym_constrain_range_typed_handle();
    return op.call(size, min, max);
}

// aten::sym_constrain_range(Scalar size, *, int? min=None, int? max=None) -> ()
void sym_constrain_range::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & size, ::std::optional<int64_t> min, ::std::optional<int64_t> max) {
    
    static auto op = create_sym_constrain_range_typed_handle();
    return op.redispatch(dispatchKeySet, size, min, max);
}

// aten::sym_constrain_range_for_size(Scalar size, *, int? min=None, int? max=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<sym_constrain_range_for_size::schema> create_sym_constrain_range_for_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sym_constrain_range_for_size::name, sym_constrain_range_for_size::overload_name)
      .typed<sym_constrain_range_for_size::schema>();
}

// aten::sym_constrain_range_for_size(Scalar size, *, int? min=None, int? max=None) -> ()
void sym_constrain_range_for_size::call(const at::Scalar & size, ::std::optional<int64_t> min, ::std::optional<int64_t> max) {
    
    static auto op = create_sym_constrain_range_for_size_typed_handle();
    return op.call(size, min, max);
}

// aten::sym_constrain_range_for_size(Scalar size, *, int? min=None, int? max=None) -> ()
void sym_constrain_range_for_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & size, ::std::optional<int64_t> min, ::std::optional<int64_t> max) {
    
    static auto op = create_sym_constrain_range_for_size_typed_handle();
    return op.redispatch(dispatchKeySet, size, min, max);
}

// aten::_functional_sym_constrain_range(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_functional_sym_constrain_range::schema> create__functional_sym_constrain_range_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_functional_sym_constrain_range::name, _functional_sym_constrain_range::overload_name)
      .typed<_functional_sym_constrain_range::schema>();
}

// aten::_functional_sym_constrain_range(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor
at::Tensor _functional_sym_constrain_range::call(const at::Scalar & size, ::std::optional<int64_t> min, ::std::optional<int64_t> max, const at::Tensor & dep_token) {
    
    static auto op = create__functional_sym_constrain_range_typed_handle();
    return op.call(size, min, max, dep_token);
}

// aten::_functional_sym_constrain_range(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor
at::Tensor _functional_sym_constrain_range::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & size, ::std::optional<int64_t> min, ::std::optional<int64_t> max, const at::Tensor & dep_token) {
    
    static auto op = create__functional_sym_constrain_range_typed_handle();
    return op.redispatch(dispatchKeySet, size, min, max, dep_token);
}

// aten::_functional_sym_constrain_range_for_size(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_functional_sym_constrain_range_for_size::schema> create__functional_sym_constrain_range_for_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_functional_sym_constrain_range_for_size::name, _functional_sym_constrain_range_for_size::overload_name)
      .typed<_functional_sym_constrain_range_for_size::schema>();
}

// aten::_functional_sym_constrain_range_for_size(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor
at::Tensor _functional_sym_constrain_range_for_size::call(const at::Scalar & size, ::std::optional<int64_t> min, ::std::optional<int64_t> max, const at::Tensor & dep_token) {
    
    static auto op = create__functional_sym_constrain_range_for_size_typed_handle();
    return op.call(size, min, max, dep_token);
}

// aten::_functional_sym_constrain_range_for_size(Scalar size, int? min, int? max, Tensor dep_token) -> Tensor
at::Tensor _functional_sym_constrain_range_for_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & size, ::std::optional<int64_t> min, ::std::optional<int64_t> max, const at::Tensor & dep_token) {
    
    static auto op = create__functional_sym_constrain_range_for_size_typed_handle();
    return op.redispatch(dispatchKeySet, size, min, max, dep_token);
}

// aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_make_dep_token::schema> create__make_dep_token_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_make_dep_token::name, _make_dep_token::overload_name)
      .typed<_make_dep_token::schema>();
}

// aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor _make_dep_token::call(::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create__make_dep_token_typed_handle();
    return op.call(dtype, layout, device, pin_memory, memory_format);
}

// aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor _make_dep_token::redispatch(c10::DispatchKeySet dispatchKeySet, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create__make_dep_token_typed_handle();
    return op.redispatch(dispatchKeySet, dtype, layout, device, pin_memory, memory_format);
}

// aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<refine_names::schema> create_refine_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(refine_names::name, refine_names::overload_name)
      .typed<refine_names::schema>();
}

// aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)
at::Tensor refine_names::call(const at::Tensor & self, at::DimnameList names) {
    
    static auto op = create_refine_names_typed_handle();
    return op.call(self, names);
}

// aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)
at::Tensor refine_names::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList names) {
    
    static auto op = create_refine_names_typed_handle();
    return op.redispatch(dispatchKeySet, self, names);
}

// aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<_use_cudnn_ctc_loss::schema> create__use_cudnn_ctc_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_use_cudnn_ctc_loss::name, _use_cudnn_ctc_loss::overload_name)
      .typed<_use_cudnn_ctc_loss::schema>();
}

// aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool
bool _use_cudnn_ctc_loss::call(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank) {
    
    static auto op = create__use_cudnn_ctc_loss_typed_handle();
    return op.call(log_probs, targets, input_lengths, target_lengths, blank);
}

// aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool
bool _use_cudnn_ctc_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank) {
    
    static auto op = create__use_cudnn_ctc_loss_typed_handle();
    return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank);
}

// aten::_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<_use_cudnn_ctc_loss_Tensor::schema> create__use_cudnn_ctc_loss_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_use_cudnn_ctc_loss_Tensor::name, _use_cudnn_ctc_loss_Tensor::overload_name)
      .typed<_use_cudnn_ctc_loss_Tensor::schema>();
}

// aten::_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool
bool _use_cudnn_ctc_loss_Tensor::call(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank) {
    
    static auto op = create__use_cudnn_ctc_loss_Tensor_typed_handle();
    return op.call(log_probs, targets, input_lengths, target_lengths, blank);
}

// aten::_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool
bool _use_cudnn_ctc_loss_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank) {
    
    static auto op = create__use_cudnn_ctc_loss_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank);
}

// aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_ctc_loss::schema> create__cudnn_ctc_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cudnn_ctc_loss::name, _cudnn_ctc_loss::overload_name)
      .typed<_cudnn_ctc_loss::schema>();
}

// aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss::call(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
    
    static auto op = create__cudnn_ctc_loss_typed_handle();
    return op.call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
}

// aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
    
    static auto op = create__cudnn_ctc_loss_typed_handle();
    return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
}

// aten::_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_ctc_loss_Tensor::schema> create__cudnn_ctc_loss_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cudnn_ctc_loss_Tensor::name, _cudnn_ctc_loss_Tensor::overload_name)
      .typed<_cudnn_ctc_loss_Tensor::schema>();
}

// aten::_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss_Tensor::call(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
    
    static auto op = create__cudnn_ctc_loss_Tensor_typed_handle();
    return op.call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
}

// aten::_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
    
    static auto op = create__cudnn_ctc_loss_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
}

// aten::_use_cudnn_rnn_flatten_weight() -> bool
static C10_NOINLINE c10::TypedOperatorHandle<_use_cudnn_rnn_flatten_weight::schema> create__use_cudnn_rnn_flatten_weight_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_use_cudnn_rnn_flatten_weight::name, _use_cudnn_rnn_flatten_weight::overload_name)
      .typed<_use_cudnn_rnn_flatten_weight::schema>();
}

// aten::_use_cudnn_rnn_flatten_weight() -> bool
bool _use_cudnn_rnn_flatten_weight::call() {
    
    static auto op = create__use_cudnn_rnn_flatten_weight_typed_handle();
    return op.call();
}

// aten::_use_cudnn_rnn_flatten_weight() -> bool
bool _use_cudnn_rnn_flatten_weight::redispatch(c10::DispatchKeySet dispatchKeySet) {
    
    static auto op = create__use_cudnn_rnn_flatten_weight_typed_handle();
    return op.redispatch(dispatchKeySet);
}

// aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_rnn_flatten_weight::schema> create__cudnn_rnn_flatten_weight_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cudnn_rnn_flatten_weight::name, _cudnn_rnn_flatten_weight::overload_name)
      .typed<_cudnn_rnn_flatten_weight::schema>();
}

// aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
at::Tensor _cudnn_rnn_flatten_weight::call(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
    
    static auto op = create__cudnn_rnn_flatten_weight_typed_handle();
    return op.call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
}

// aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
at::Tensor _cudnn_rnn_flatten_weight::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
    
    static auto op = create__cudnn_rnn_flatten_weight_typed_handle();
    return op.redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
}

// aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_rnn::schema> create__cudnn_rnn_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cudnn_rnn::name, _cudnn_rnn::overload_name)
      .typed<_cudnn_rnn::schema>();
}

// aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _cudnn_rnn::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const ::std::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state) {
    
    static auto op = create__cudnn_rnn_typed_handle();
    return op.call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
}

// aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _cudnn_rnn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const ::std::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state) {
    
    static auto op = create__cudnn_rnn_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
}

// aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_rnn_backward::schema> create__cudnn_rnn_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cudnn_rnn_backward::name, _cudnn_rnn_backward::overload_name)
      .typed<_cudnn_rnn_backward::schema>();
}

// aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
    
    static auto op = create__cudnn_rnn_backward_typed_handle();
    return op.call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
}

// aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
    
    static auto op = create__cudnn_rnn_backward_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
}

// aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_init_dropout_state::schema> create__cudnn_init_dropout_state_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cudnn_init_dropout_state::name, _cudnn_init_dropout_state::overload_name)
      .typed<_cudnn_init_dropout_state::schema>();
}

// aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor _cudnn_init_dropout_state::call(double dropout, bool train, int64_t dropout_seed, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__cudnn_init_dropout_state_typed_handle();
    return op.call(dropout, train, dropout_seed, dtype, layout, device, pin_memory);
}

// aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor _cudnn_init_dropout_state::redispatch(c10::DispatchKeySet dispatchKeySet, double dropout, bool train, int64_t dropout_seed, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__cudnn_init_dropout_state_typed_handle();
    return op.redispatch(dispatchKeySet, dropout, train, dropout_seed, dtype, layout, device, pin_memory);
}

// aten::_debug_has_internal_overlap(Tensor self) -> int
static C10_NOINLINE c10::TypedOperatorHandle<_debug_has_internal_overlap::schema> create__debug_has_internal_overlap_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_debug_has_internal_overlap::name, _debug_has_internal_overlap::overload_name)
      .typed<_debug_has_internal_overlap::schema>();
}

// aten::_debug_has_internal_overlap(Tensor self) -> int
int64_t _debug_has_internal_overlap::call(const at::Tensor & self) {
    
    static auto op = create__debug_has_internal_overlap_typed_handle();
    return op.call(self);
}

// aten::_debug_has_internal_overlap(Tensor self) -> int
int64_t _debug_has_internal_overlap::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__debug_has_internal_overlap_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_fused_dropout::schema> create__fused_dropout_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_dropout::name, _fused_dropout::overload_name)
      .typed<_fused_dropout::schema>();
}

// aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _fused_dropout::call(const at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    
    static auto op = create__fused_dropout_typed_handle();
    return op.call(self, p, generator);
}

// aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _fused_dropout::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    
    static auto op = create__fused_dropout_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator);
}

// aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_masked_scale::schema> create__masked_scale_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_masked_scale::name, _masked_scale::overload_name)
      .typed<_masked_scale::schema>();
}

// aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor
at::Tensor _masked_scale::call(const at::Tensor & self, const at::Tensor & mask, double scale) {
    
    static auto op = create__masked_scale_typed_handle();
    return op.call(self, mask, scale);
}

// aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor
at::Tensor _masked_scale::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, double scale) {
    
    static auto op = create__masked_scale_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, scale);
}

// aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<native_dropout::schema> create_native_dropout_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_dropout::name, native_dropout::overload_name)
      .typed<native_dropout::schema>();
}

// aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> native_dropout::call(const at::Tensor & input, double p, ::std::optional<bool> train) {
    
    static auto op = create_native_dropout_typed_handle();
    return op.call(input, p, train);
}

// aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> native_dropout::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, ::std::optional<bool> train) {
    
    static auto op = create_native_dropout_typed_handle();
    return op.redispatch(dispatchKeySet, input, p, train);
}

// aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<native_dropout_backward::schema> create_native_dropout_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_dropout_backward::name, native_dropout_backward::overload_name)
      .typed<native_dropout_backward::schema>();
}

// aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor
at::Tensor native_dropout_backward::call(const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
    
    static auto op = create_native_dropout_backward_typed_handle();
    return op.call(grad_output, mask, scale);
}

// aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor
at::Tensor native_dropout_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
    
    static auto op = create_native_dropout_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, mask, scale);
}

// aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_sobol_engine_draw::schema> create__sobol_engine_draw_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sobol_engine_draw::name, _sobol_engine_draw::overload_name)
      .typed<_sobol_engine_draw::schema>();
}

// aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _sobol_engine_draw::call(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sobol_engine_draw_typed_handle();
    return op.call(quasi, n, sobolstate, dimension, num_generated, dtype);
}

// aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _sobol_engine_draw::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sobol_engine_draw_typed_handle();
    return op.redispatch(dispatchKeySet, quasi, n, sobolstate, dimension, num_generated, dtype);
}

// aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sobol_engine_ff_::schema> create__sobol_engine_ff__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sobol_engine_ff_::name, _sobol_engine_ff_::overload_name)
      .typed<_sobol_engine_ff_::schema>();
}

// aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)
at::Tensor & _sobol_engine_ff_::call(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) {
    
    static auto op = create__sobol_engine_ff__typed_handle();
    return op.call(self, n, sobolstate, dimension, num_generated);
}

// aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)
at::Tensor & _sobol_engine_ff_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) {
    
    static auto op = create__sobol_engine_ff__typed_handle();
    return op.redispatch(dispatchKeySet, self, n, sobolstate, dimension, num_generated);
}

// aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sobol_engine_scramble_::schema> create__sobol_engine_scramble__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sobol_engine_scramble_::name, _sobol_engine_scramble_::overload_name)
      .typed<_sobol_engine_scramble_::schema>();
}

// aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)
at::Tensor & _sobol_engine_scramble_::call(at::Tensor & self, const at::Tensor & ltm, int64_t dimension) {
    
    static auto op = create__sobol_engine_scramble__typed_handle();
    return op.call(self, ltm, dimension);
}

// aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)
at::Tensor & _sobol_engine_scramble_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & ltm, int64_t dimension) {
    
    static auto op = create__sobol_engine_scramble__typed_handle();
    return op.redispatch(dispatchKeySet, self, ltm, dimension);
}

// aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sobol_engine_initialize_state_::schema> create__sobol_engine_initialize_state__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sobol_engine_initialize_state_::name, _sobol_engine_initialize_state_::overload_name)
      .typed<_sobol_engine_initialize_state_::schema>();
}

// aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)
at::Tensor & _sobol_engine_initialize_state_::call(at::Tensor & self, int64_t dimension) {
    
    static auto op = create__sobol_engine_initialize_state__typed_handle();
    return op.call(self, dimension);
}

// aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)
at::Tensor & _sobol_engine_initialize_state_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dimension) {
    
    static auto op = create__sobol_engine_initialize_state__typed_handle();
    return op.redispatch(dispatchKeySet, self, dimension);
}

// aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_reshape_from_tensor::schema> create__reshape_from_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_reshape_from_tensor::name, _reshape_from_tensor::overload_name)
      .typed<_reshape_from_tensor::schema>();
}

// aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor
at::Tensor _reshape_from_tensor::call(const at::Tensor & self, const at::Tensor & shape) {
    
    static auto op = create__reshape_from_tensor_typed_handle();
    return op.call(self, shape);
}

// aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor
at::Tensor _reshape_from_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & shape) {
    
    static auto op = create__reshape_from_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, shape);
}

// aten::_shape_as_tensor(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_shape_as_tensor::schema> create__shape_as_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_shape_as_tensor::name, _shape_as_tensor::overload_name)
      .typed<_shape_as_tensor::schema>();
}

// aten::_shape_as_tensor(Tensor self) -> Tensor
at::Tensor _shape_as_tensor::call(const at::Tensor & self) {
    
    static auto op = create__shape_as_tensor_typed_handle();
    return op.call(self);
}

// aten::_shape_as_tensor(Tensor self) -> Tensor
at::Tensor _shape_as_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__shape_as_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::dropout(Tensor input, float p, bool train) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<dropout::schema> create_dropout_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dropout::name, dropout::overload_name)
      .typed<dropout::schema>();
}

// aten::dropout(Tensor input, float p, bool train) -> Tensor
at::Tensor dropout::call(const at::Tensor & input, double p, bool train) {
    
    static auto op = create_dropout_typed_handle();
    return op.call(input, p, train);
}

// aten::dropout(Tensor input, float p, bool train) -> Tensor
at::Tensor dropout::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) {
    
    static auto op = create_dropout_typed_handle();
    return op.redispatch(dispatchKeySet, input, p, train);
}

// aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<dropout_::schema> create_dropout__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dropout_::name, dropout_::overload_name)
      .typed<dropout_::schema>();
}

// aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
at::Tensor & dropout_::call(at::Tensor & self, double p, bool train) {
    
    static auto op = create_dropout__typed_handle();
    return op.call(self, p, train);
}

// aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
at::Tensor & dropout_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) {
    
    static auto op = create_dropout__typed_handle();
    return op.redispatch(dispatchKeySet, self, p, train);
}

// aten::feature_dropout(Tensor input, float p, bool train) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<feature_dropout::schema> create_feature_dropout_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(feature_dropout::name, feature_dropout::overload_name)
      .typed<feature_dropout::schema>();
}

// aten::feature_dropout(Tensor input, float p, bool train) -> Tensor
at::Tensor feature_dropout::call(const at::Tensor & input, double p, bool train) {
    
    static auto op = create_feature_dropout_typed_handle();
    return op.call(input, p, train);
}

// aten::feature_dropout(Tensor input, float p, bool train) -> Tensor
at::Tensor feature_dropout::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) {
    
    static auto op = create_feature_dropout_typed_handle();
    return op.redispatch(dispatchKeySet, input, p, train);
}

// aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<feature_dropout_::schema> create_feature_dropout__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(feature_dropout_::name, feature_dropout_::overload_name)
      .typed<feature_dropout_::schema>();
}

// aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
at::Tensor & feature_dropout_::call(at::Tensor & self, double p, bool train) {
    
    static auto op = create_feature_dropout__typed_handle();
    return op.call(self, p, train);
}

// aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
at::Tensor & feature_dropout_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) {
    
    static auto op = create_feature_dropout__typed_handle();
    return op.redispatch(dispatchKeySet, self, p, train);
}

// aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<alpha_dropout::schema> create_alpha_dropout_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(alpha_dropout::name, alpha_dropout::overload_name)
      .typed<alpha_dropout::schema>();
}

// aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor
at::Tensor alpha_dropout::call(const at::Tensor & input, double p, bool train) {
    
    static auto op = create_alpha_dropout_typed_handle();
    return op.call(input, p, train);
}

// aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor
at::Tensor alpha_dropout::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) {
    
    static auto op = create_alpha_dropout_typed_handle();
    return op.redispatch(dispatchKeySet, input, p, train);
}

// aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<alpha_dropout_::schema> create_alpha_dropout__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(alpha_dropout_::name, alpha_dropout_::overload_name)
      .typed<alpha_dropout_::schema>();
}

// aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
at::Tensor & alpha_dropout_::call(at::Tensor & self, double p, bool train) {
    
    static auto op = create_alpha_dropout__typed_handle();
    return op.call(self, p, train);
}

// aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
at::Tensor & alpha_dropout_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) {
    
    static auto op = create_alpha_dropout__typed_handle();
    return op.redispatch(dispatchKeySet, self, p, train);
}

// aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<feature_alpha_dropout::schema> create_feature_alpha_dropout_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(feature_alpha_dropout::name, feature_alpha_dropout::overload_name)
      .typed<feature_alpha_dropout::schema>();
}

// aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
at::Tensor feature_alpha_dropout::call(const at::Tensor & input, double p, bool train) {
    
    static auto op = create_feature_alpha_dropout_typed_handle();
    return op.call(input, p, train);
}

// aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
at::Tensor feature_alpha_dropout::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) {
    
    static auto op = create_feature_alpha_dropout_typed_handle();
    return op.redispatch(dispatchKeySet, input, p, train);
}

// aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<feature_alpha_dropout_::schema> create_feature_alpha_dropout__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(feature_alpha_dropout_::name, feature_alpha_dropout_::overload_name)
      .typed<feature_alpha_dropout_::schema>();
}

// aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
at::Tensor & feature_alpha_dropout_::call(at::Tensor & self, double p, bool train) {
    
    static auto op = create_feature_alpha_dropout__typed_handle();
    return op.call(self, p, train);
}

// aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
at::Tensor & feature_alpha_dropout_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) {
    
    static auto op = create_feature_alpha_dropout__typed_handle();
    return op.redispatch(dispatchKeySet, self, p, train);
}

// aten::abs(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<abs::schema> create_abs_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(abs::name, abs::overload_name)
      .typed<abs::schema>();
}

// aten::abs(Tensor self) -> Tensor
at::Tensor abs::call(const at::Tensor & self) {
    
    static auto op = create_abs_typed_handle();
    return op.call(self);
}

// aten::abs(Tensor self) -> Tensor
at::Tensor abs::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_abs_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::abs_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<abs_::schema> create_abs__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(abs_::name, abs_::overload_name)
      .typed<abs_::schema>();
}

// aten::abs_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & abs_::call(at::Tensor & self) {
    
    static auto op = create_abs__typed_handle();
    return op.call(self);
}

// aten::abs_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & abs_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_abs__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<abs_out::schema> create_abs_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(abs_out::name, abs_out::overload_name)
      .typed<abs_out::schema>();
}

// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & abs_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_abs_out_typed_handle();
    return op.call(self, out);
}

// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & abs_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_abs_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::absolute(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<absolute::schema> create_absolute_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(absolute::name, absolute::overload_name)
      .typed<absolute::schema>();
}

// aten::absolute(Tensor self) -> Tensor
at::Tensor absolute::call(const at::Tensor & self) {
    
    static auto op = create_absolute_typed_handle();
    return op.call(self);
}

// aten::absolute(Tensor self) -> Tensor
at::Tensor absolute::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_absolute_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::absolute_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<absolute_::schema> create_absolute__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(absolute_::name, absolute_::overload_name)
      .typed<absolute_::schema>();
}

// aten::absolute_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & absolute_::call(at::Tensor & self) {
    
    static auto op = create_absolute__typed_handle();
    return op.call(self);
}

// aten::absolute_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & absolute_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_absolute__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<absolute_out::schema> create_absolute_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(absolute_out::name, absolute_out::overload_name)
      .typed<absolute_out::schema>();
}

// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & absolute_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_absolute_out_typed_handle();
    return op.call(self, out);
}

// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & absolute_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_absolute_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::angle(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<angle::schema> create_angle_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(angle::name, angle::overload_name)
      .typed<angle::schema>();
}

// aten::angle(Tensor self) -> Tensor
at::Tensor angle::call(const at::Tensor & self) {
    
    static auto op = create_angle_typed_handle();
    return op.call(self);
}

// aten::angle(Tensor self) -> Tensor
at::Tensor angle::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_angle_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<angle_out::schema> create_angle_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(angle_out::name, angle_out::overload_name)
      .typed<angle_out::schema>();
}

// aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & angle_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_angle_out_typed_handle();
    return op.call(self, out);
}

// aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & angle_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_angle_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::view_as_real(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<view_as_real::schema> create_view_as_real_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_as_real::name, view_as_real::overload_name)
      .typed<view_as_real::schema>();
}

// aten::view_as_real(Tensor(a) self) -> Tensor(a)
at::Tensor view_as_real::call(const at::Tensor & self) {
    
    static auto op = create_view_as_real_typed_handle();
    return op.call(self);
}

// aten::view_as_real(Tensor(a) self) -> Tensor(a)
at::Tensor view_as_real::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_view_as_real_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::view_as_complex(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<view_as_complex::schema> create_view_as_complex_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_as_complex::name, view_as_complex::overload_name)
      .typed<view_as_complex::schema>();
}

// aten::view_as_complex(Tensor(a) self) -> Tensor(a)
at::Tensor view_as_complex::call(const at::Tensor & self) {
    
    static auto op = create_view_as_complex_typed_handle();
    return op.call(self);
}

// aten::view_as_complex(Tensor(a) self) -> Tensor(a)
at::Tensor view_as_complex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_view_as_complex_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sgn(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sgn::schema> create_sgn_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sgn::name, sgn::overload_name)
      .typed<sgn::schema>();
}

// aten::sgn(Tensor self) -> Tensor
at::Tensor sgn::call(const at::Tensor & self) {
    
    static auto op = create_sgn_typed_handle();
    return op.call(self);
}

// aten::sgn(Tensor self) -> Tensor
at::Tensor sgn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_sgn_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sgn_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sgn_::schema> create_sgn__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sgn_::name, sgn_::overload_name)
      .typed<sgn_::schema>();
}

// aten::sgn_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & sgn_::call(at::Tensor & self) {
    
    static auto op = create_sgn__typed_handle();
    return op.call(self);
}

// aten::sgn_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & sgn_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_sgn__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sgn_out::schema> create_sgn_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sgn_out::name, sgn_out::overload_name)
      .typed<sgn_out::schema>();
}

// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sgn_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_sgn_out_typed_handle();
    return op.call(self, out);
}

// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sgn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_sgn_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<chalf::schema> create_chalf_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(chalf::name, chalf::overload_name)
      .typed<chalf::schema>();
}

// aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
at::Tensor chalf::call(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_chalf_typed_handle();
    return op.call(self, memory_format);
}

// aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
at::Tensor chalf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_chalf_typed_handle();
    return op.redispatch(dispatchKeySet, self, memory_format);
}

// aten::real(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<real::schema> create_real_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(real::name, real::overload_name)
      .typed<real::schema>();
}

// aten::real(Tensor(a) self) -> Tensor(a)
at::Tensor real::call(const at::Tensor & self) {
    
    static auto op = create_real_typed_handle();
    return op.call(self);
}

// aten::real(Tensor(a) self) -> Tensor(a)
at::Tensor real::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_real_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::imag(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<imag::schema> create_imag_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(imag::name, imag::overload_name)
      .typed<imag::schema>();
}

// aten::imag(Tensor(a) self) -> Tensor(a)
at::Tensor imag::call(const at::Tensor & self) {
    
    static auto op = create_imag_typed_handle();
    return op.call(self);
}

// aten::imag(Tensor(a) self) -> Tensor(a)
at::Tensor imag::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_imag_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_conj(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<_conj::schema> create__conj_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_conj::name, _conj::overload_name)
      .typed<_conj::schema>();
}

// aten::_conj(Tensor(a) self) -> Tensor(a)
at::Tensor _conj::call(const at::Tensor & self) {
    
    static auto op = create__conj_typed_handle();
    return op.call(self);
}

// aten::_conj(Tensor(a) self) -> Tensor(a)
at::Tensor _conj::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__conj_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::conj(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<conj::schema> create_conj_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conj::name, conj::overload_name)
      .typed<conj::schema>();
}

// aten::conj(Tensor(a) self) -> Tensor(a)
at::Tensor conj::call(const at::Tensor & self) {
    
    static auto op = create_conj_typed_handle();
    return op.call(self);
}

// aten::conj(Tensor(a) self) -> Tensor(a)
at::Tensor conj::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_conj_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_conj_physical(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_conj_physical::schema> create__conj_physical_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_conj_physical::name, _conj_physical::overload_name)
      .typed<_conj_physical::schema>();
}

// aten::_conj_physical(Tensor self) -> Tensor
at::Tensor _conj_physical::call(const at::Tensor & self) {
    
    static auto op = create__conj_physical_typed_handle();
    return op.call(self);
}

// aten::_conj_physical(Tensor self) -> Tensor
at::Tensor _conj_physical::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__conj_physical_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::conj_physical(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<conj_physical::schema> create_conj_physical_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conj_physical::name, conj_physical::overload_name)
      .typed<conj_physical::schema>();
}

// aten::conj_physical(Tensor self) -> Tensor
at::Tensor conj_physical::call(const at::Tensor & self) {
    
    static auto op = create_conj_physical_typed_handle();
    return op.call(self);
}

// aten::conj_physical(Tensor self) -> Tensor
at::Tensor conj_physical::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_conj_physical_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<conj_physical_out::schema> create_conj_physical_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conj_physical_out::name, conj_physical_out::overload_name)
      .typed<conj_physical_out::schema>();
}

// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & conj_physical_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_conj_physical_out_typed_handle();
    return op.call(self, out);
}

// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & conj_physical_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_conj_physical_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<conj_physical_::schema> create_conj_physical__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conj_physical_::name, conj_physical_::overload_name)
      .typed<conj_physical_::schema>();
}

// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & conj_physical_::call(at::Tensor & self) {
    
    static auto op = create_conj_physical__typed_handle();
    return op.call(self);
}

// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & conj_physical_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_conj_physical__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::resolve_conj(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<resolve_conj::schema> create_resolve_conj_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(resolve_conj::name, resolve_conj::overload_name)
      .typed<resolve_conj::schema>();
}

// aten::resolve_conj(Tensor(a) self) -> Tensor(a)
at::Tensor resolve_conj::call(const at::Tensor & self) {
    
    static auto op = create_resolve_conj_typed_handle();
    return op.call(self);
}

// aten::resolve_conj(Tensor(a) self) -> Tensor(a)
at::Tensor resolve_conj::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_resolve_conj_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::resolve_neg(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<resolve_neg::schema> create_resolve_neg_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(resolve_neg::name, resolve_neg::overload_name)
      .typed<resolve_neg::schema>();
}

// aten::resolve_neg(Tensor(a) self) -> Tensor(a)
at::Tensor resolve_neg::call(const at::Tensor & self) {
    
    static auto op = create_resolve_neg_typed_handle();
    return op.call(self);
}

// aten::resolve_neg(Tensor(a) self) -> Tensor(a)
at::Tensor resolve_neg::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_resolve_neg_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_neg_view(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<_neg_view::schema> create__neg_view_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_neg_view::name, _neg_view::overload_name)
      .typed<_neg_view::schema>();
}

// aten::_neg_view(Tensor(a) self) -> Tensor(a)
at::Tensor _neg_view::call(const at::Tensor & self) {
    
    static auto op = create__neg_view_typed_handle();
    return op.call(self);
}

// aten::_neg_view(Tensor(a) self) -> Tensor(a)
at::Tensor _neg_view::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__neg_view_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::acos(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<acos::schema> create_acos_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(acos::name, acos::overload_name)
      .typed<acos::schema>();
}

// aten::acos(Tensor self) -> Tensor
at::Tensor acos::call(const at::Tensor & self) {
    
    static auto op = create_acos_typed_handle();
    return op.call(self);
}

// aten::acos(Tensor self) -> Tensor
at::Tensor acos::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_acos_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::acos_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<acos_::schema> create_acos__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(acos_::name, acos_::overload_name)
      .typed<acos_::schema>();
}

// aten::acos_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & acos_::call(at::Tensor & self) {
    
    static auto op = create_acos__typed_handle();
    return op.call(self);
}

// aten::acos_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & acos_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_acos__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<acos_out::schema> create_acos_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(acos_out::name, acos_out::overload_name)
      .typed<acos_out::schema>();
}

// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & acos_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_acos_out_typed_handle();
    return op.call(self, out);
}

// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & acos_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_acos_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::arccos(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<arccos::schema> create_arccos_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arccos::name, arccos::overload_name)
      .typed<arccos::schema>();
}

// aten::arccos(Tensor self) -> Tensor
at::Tensor arccos::call(const at::Tensor & self) {
    
    static auto op = create_arccos_typed_handle();
    return op.call(self);
}

// aten::arccos(Tensor self) -> Tensor
at::Tensor arccos::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_arccos_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::arccos_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arccos_::schema> create_arccos__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arccos_::name, arccos_::overload_name)
      .typed<arccos_::schema>();
}

// aten::arccos_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & arccos_::call(at::Tensor & self) {
    
    static auto op = create_arccos__typed_handle();
    return op.call(self);
}

// aten::arccos_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & arccos_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_arccos__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arccos_out::schema> create_arccos_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arccos_out::name, arccos_out::overload_name)
      .typed<arccos_out::schema>();
}

// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arccos_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_arccos_out_typed_handle();
    return op.call(self, out);
}

// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arccos_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_arccos_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<avg_pool1d::schema> create_avg_pool1d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(avg_pool1d::name, avg_pool1d::overload_name)
      .typed<avg_pool1d::schema>();
}

// aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor
at::Tensor avg_pool1d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
    
    static auto op = create_avg_pool1d_typed_handle();
    return op.call(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
}

// aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor
at::Tensor avg_pool1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
    
    static auto op = create_avg_pool1d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
}

// aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_avg_pool1d::schema> create_adaptive_avg_pool1d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_avg_pool1d::name, adaptive_avg_pool1d::overload_name)
      .typed<adaptive_avg_pool1d::schema>();
}

// aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor
at::Tensor adaptive_avg_pool1d::call(const at::Tensor & self, at::IntArrayRef output_size) {
    
    static auto op = create_adaptive_avg_pool1d_typed_handle();
    return op.call(self, output_size);
}

// aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor
at::Tensor adaptive_avg_pool1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) {
    
    static auto op = create_adaptive_avg_pool1d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size);
}

// aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool1d::schema> create_adaptive_max_pool1d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_max_pool1d::name, adaptive_max_pool1d::overload_name)
      .typed<adaptive_max_pool1d::schema>();
}

// aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool1d::call(const at::Tensor & self, at::IntArrayRef output_size) {
    
    static auto op = create_adaptive_max_pool1d_typed_handle();
    return op.call(self, output_size);
}

// aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) {
    
    static auto op = create_adaptive_max_pool1d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size);
}

// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<add_Tensor::schema> create_add_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(add_Tensor::name, add_Tensor::overload_name)
      .typed<add_Tensor::schema>();
}

// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
at::Tensor add_Tensor::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_add_Tensor_typed_handle();
    return op.call(self, other, alpha);
}

// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
at::Tensor add_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_add_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<add__Tensor::schema> create_add__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(add__Tensor::name, add__Tensor::overload_name)
      .typed<add__Tensor::schema>();
}

// aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
at::Tensor & add__Tensor::call(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_add__Tensor_typed_handle();
    return op.call(self, other, alpha);
}

// aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
at::Tensor & add__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_add__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<add_out::schema> create_add_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(add_out::name, add_out::overload_name)
      .typed<add_out::schema>();
}

// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & add_out::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_add_out_typed_handle();
    return op.call(self, other, alpha, out);
}

// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & add_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_add_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha, out);
}

// aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_add_relu_Tensor::schema> create__add_relu_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_add_relu_Tensor::name, _add_relu_Tensor::overload_name)
      .typed<_add_relu_Tensor::schema>();
}

// aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
at::Tensor _add_relu_Tensor::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create__add_relu_Tensor_typed_handle();
    return op.call(self, other, alpha);
}

// aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
at::Tensor _add_relu_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create__add_relu_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_add_relu__Tensor::schema> create__add_relu__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_add_relu__Tensor::name, _add_relu__Tensor::overload_name)
      .typed<_add_relu__Tensor::schema>();
}

// aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
at::Tensor & _add_relu__Tensor::call(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create__add_relu__Tensor_typed_handle();
    return op.call(self, other, alpha);
}

// aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
at::Tensor & _add_relu__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create__add_relu__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_add_relu_out::schema> create__add_relu_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_add_relu_out::name, _add_relu_out::overload_name)
      .typed<_add_relu_out::schema>();
}

// aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _add_relu_out::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create__add_relu_out_typed_handle();
    return op.call(self, other, alpha, out);
}

// aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _add_relu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create__add_relu_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha, out);
}

// aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_add_relu_Scalar::schema> create__add_relu_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_add_relu_Scalar::name, _add_relu_Scalar::overload_name)
      .typed<_add_relu_Scalar::schema>();
}

// aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
at::Tensor _add_relu_Scalar::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create__add_relu_Scalar_typed_handle();
    return op.call(self, other, alpha);
}

// aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
at::Tensor _add_relu_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create__add_relu_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_add_relu__Scalar::schema> create__add_relu__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_add_relu__Scalar::name, _add_relu__Scalar::overload_name)
      .typed<_add_relu__Scalar::schema>();
}

// aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
at::Tensor & _add_relu__Scalar::call(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create__add_relu__Scalar_typed_handle();
    return op.call(self, other, alpha);
}

// aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
at::Tensor & _add_relu__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create__add_relu__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<add_Scalar::schema> create_add_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(add_Scalar::name, add_Scalar::overload_name)
      .typed<add_Scalar::schema>();
}

// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
at::Tensor add_Scalar::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_add_Scalar_typed_handle();
    return op.call(self, other, alpha);
}

// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
at::Tensor add_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_add_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<add__Scalar::schema> create_add__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(add__Scalar::name, add__Scalar::overload_name)
      .typed<add__Scalar::schema>();
}

// aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
at::Tensor & add__Scalar::call(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_add__Scalar_typed_handle();
    return op.call(self, other, alpha);
}

// aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
at::Tensor & add__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_add__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<addmv::schema> create_addmv_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addmv::name, addmv::overload_name)
      .typed<addmv::schema>();
}

// aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor addmv::call(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addmv_typed_handle();
    return op.call(self, mat, vec, beta, alpha);
}

// aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor addmv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addmv_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat, vec, beta, alpha);
}

// aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<addmv_::schema> create_addmv__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addmv_::name, addmv_::overload_name)
      .typed<addmv_::schema>();
}

// aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
at::Tensor & addmv_::call(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addmv__typed_handle();
    return op.call(self, mat, vec, beta, alpha);
}

// aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
at::Tensor & addmv_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addmv__typed_handle();
    return op.redispatch(dispatchKeySet, self, mat, vec, beta, alpha);
}

// aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<addmv_out::schema> create_addmv_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addmv_out::name, addmv_out::overload_name)
      .typed<addmv_out::schema>();
}

// aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & addmv_out::call(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_addmv_out_typed_handle();
    return op.call(self, mat, vec, beta, alpha, out);
}

// aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & addmv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_addmv_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat, vec, beta, alpha, out);
}

// aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<addr::schema> create_addr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addr::name, addr::overload_name)
      .typed<addr::schema>();
}

// aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor addr::call(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addr_typed_handle();
    return op.call(self, vec1, vec2, beta, alpha);
}

// aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor addr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addr_typed_handle();
    return op.redispatch(dispatchKeySet, self, vec1, vec2, beta, alpha);
}

// aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<addr_::schema> create_addr__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addr_::name, addr_::overload_name)
      .typed<addr_::schema>();
}

// aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
at::Tensor & addr_::call(at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addr__typed_handle();
    return op.call(self, vec1, vec2, beta, alpha);
}

// aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
at::Tensor & addr_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addr__typed_handle();
    return op.redispatch(dispatchKeySet, self, vec1, vec2, beta, alpha);
}

// aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<addr_out::schema> create_addr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addr_out::name, addr_out::overload_name)
      .typed<addr_out::schema>();
}

// aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & addr_out::call(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_addr_out_typed_handle();
    return op.call(self, vec1, vec2, beta, alpha, out);
}

// aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & addr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_addr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, vec1, vec2, beta, alpha, out);
}

// aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<affine_grid_generator::schema> create_affine_grid_generator_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(affine_grid_generator::name, affine_grid_generator::overload_name)
      .typed<affine_grid_generator::schema>();
}

// aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor
at::Tensor affine_grid_generator::call(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners) {
    
    static auto op = create_affine_grid_generator_typed_handle();
    return op.call(theta, size, align_corners);
}

// aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor
at::Tensor affine_grid_generator::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners) {
    
    static auto op = create_affine_grid_generator_typed_handle();
    return op.redispatch(dispatchKeySet, theta, size, align_corners);
}

// aten::affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<affine_grid_generator_backward::schema> create_affine_grid_generator_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(affine_grid_generator_backward::name, affine_grid_generator_backward::overload_name)
      .typed<affine_grid_generator_backward::schema>();
}

// aten::affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor
at::Tensor affine_grid_generator_backward::call(const at::Tensor & grad, c10::SymIntArrayRef size, bool align_corners) {
    
    static auto op = create_affine_grid_generator_backward_typed_handle();
    return op.call(grad, size, align_corners);
}

// aten::affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor
at::Tensor affine_grid_generator_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef size, bool align_corners) {
    
    static auto op = create_affine_grid_generator_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, size, align_corners);
}

// aten::_is_all_true(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_is_all_true::schema> create__is_all_true_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_is_all_true::name, _is_all_true::overload_name)
      .typed<_is_all_true::schema>();
}

// aten::_is_all_true(Tensor self) -> Tensor
at::Tensor _is_all_true::call(const at::Tensor & self) {
    
    static auto op = create__is_all_true_typed_handle();
    return op.call(self);
}

// aten::_is_all_true(Tensor self) -> Tensor
at::Tensor _is_all_true::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__is_all_true_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_is_any_true(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_is_any_true::schema> create__is_any_true_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_is_any_true::name, _is_any_true::overload_name)
      .typed<_is_any_true::schema>();
}

// aten::_is_any_true(Tensor self) -> Tensor
at::Tensor _is_any_true::call(const at::Tensor & self) {
    
    static auto op = create__is_any_true_typed_handle();
    return op.call(self);
}

// aten::_is_any_true(Tensor self) -> Tensor
at::Tensor _is_any_true::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__is_any_true_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_test_check_tensor(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_test_check_tensor::schema> create__test_check_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_check_tensor::name, _test_check_tensor::overload_name)
      .typed<_test_check_tensor::schema>();
}

// aten::_test_check_tensor(Tensor self) -> Tensor
at::Tensor _test_check_tensor::call(const at::Tensor & self) {
    
    static auto op = create__test_check_tensor_typed_handle();
    return op.call(self);
}

// aten::_test_check_tensor(Tensor self) -> Tensor
at::Tensor _test_check_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__test_check_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_test_functorch_fallback(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_test_functorch_fallback::schema> create__test_functorch_fallback_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_functorch_fallback::name, _test_functorch_fallback::overload_name)
      .typed<_test_functorch_fallback::schema>();
}

// aten::_test_functorch_fallback(Tensor self, Tensor other) -> Tensor
at::Tensor _test_functorch_fallback::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create__test_functorch_fallback_typed_handle();
    return op.call(self, other);
}

// aten::_test_functorch_fallback(Tensor self, Tensor other) -> Tensor
at::Tensor _test_functorch_fallback::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create__test_functorch_fallback_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<all_dim::schema> create_all_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all_dim::name, all_dim::overload_name)
      .typed<all_dim::schema>();
}

// aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
at::Tensor all_dim::call(const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_all_dim_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
at::Tensor all_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_all_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<all_dims::schema> create_all_dims_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all_dims::name, all_dims::overload_name)
      .typed<all_dims::schema>();
}

// aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor
at::Tensor all_dims::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
    
    static auto op = create_all_dims_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor
at::Tensor all_dims::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
    
    static auto op = create_all_dims_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<all_out::schema> create_all_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all_out::name, all_out::overload_name)
      .typed<all_out::schema>();
}

// aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_out::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_all_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_all_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<all_dims_out::schema> create_all_dims_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all_dims_out::name, all_dims_out::overload_name)
      .typed<all_dims_out::schema>();
}

// aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_dims_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_all_dims_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_dims_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_all_dims_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<all_dimname::schema> create_all_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all_dimname::name, all_dimname::overload_name)
      .typed<all_dimname::schema>();
}

// aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
at::Tensor all_dimname::call(const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_all_dimname_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
at::Tensor all_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_all_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<all_dimname_out::schema> create_all_dimname_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all_dimname_out::name, all_dimname_out::overload_name)
      .typed<all_dimname_out::schema>();
}

// aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_dimname_out::call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_all_dimname_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_all_dimname_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<allclose::schema> create_allclose_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(allclose::name, allclose::overload_name)
      .typed<allclose::schema>();
}

// aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool
bool allclose::call(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) {
    
    static auto op = create_allclose_typed_handle();
    return op.call(self, other, rtol, atol, equal_nan);
}

// aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool
bool allclose::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) {
    
    static auto op = create_allclose_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, rtol, atol, equal_nan);
}

// aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<any_dim::schema> create_any_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(any_dim::name, any_dim::overload_name)
      .typed<any_dim::schema>();
}

// aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
at::Tensor any_dim::call(const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_any_dim_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
at::Tensor any_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_any_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<any_dims::schema> create_any_dims_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(any_dims::name, any_dims::overload_name)
      .typed<any_dims::schema>();
}

// aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor
at::Tensor any_dims::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
    
    static auto op = create_any_dims_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor
at::Tensor any_dims::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
    
    static auto op = create_any_dims_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<any_out::schema> create_any_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(any_out::name, any_out::overload_name)
      .typed<any_out::schema>();
}

// aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & any_out::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_any_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & any_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_any_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<any_dims_out::schema> create_any_dims_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(any_dims_out::name, any_dims_out::overload_name)
      .typed<any_dims_out::schema>();
}

// aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & any_dims_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_any_dims_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & any_dims_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_any_dims_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<any_dimname::schema> create_any_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(any_dimname::name, any_dimname::overload_name)
      .typed<any_dimname::schema>();
}

// aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
at::Tensor any_dimname::call(const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_any_dimname_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
at::Tensor any_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_any_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<any_dimname_out::schema> create_any_dimname_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(any_dimname_out::name, any_dimname_out::overload_name)
      .typed<any_dimname_out::schema>();
}

// aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & any_dimname_out::call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_any_dimname_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & any_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_any_dimname_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<arange::schema> create_arange_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arange::name, arange::overload_name)
      .typed<arange::schema>();
}

// aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor arange::call(const at::Scalar & end, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_arange_typed_handle();
    return op.call(end, dtype, layout, device, pin_memory);
}

// aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor arange::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & end, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_arange_typed_handle();
    return op.redispatch(dispatchKeySet, end, dtype, layout, device, pin_memory);
}

// aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<arange_start::schema> create_arange_start_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arange_start::name, arange_start::overload_name)
      .typed<arange_start::schema>();
}

// aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor arange_start::call(const at::Scalar & start, const at::Scalar & end, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_arange_start_typed_handle();
    return op.call(start, end, dtype, layout, device, pin_memory);
}

// aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor arange_start::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_arange_start_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, dtype, layout, device, pin_memory);
}

// aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<arange_start_step::schema> create_arange_start_step_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arange_start_step::name, arange_start_step::overload_name)
      .typed<arange_start_step::schema>();
}

// aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor arange_start_step::call(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_arange_start_step_typed_handle();
    return op.call(start, end, step, dtype, layout, device, pin_memory);
}

// aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor arange_start_step::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_arange_start_step_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, step, dtype, layout, device, pin_memory);
}

// aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arange_out::schema> create_arange_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arange_out::name, arange_out::overload_name)
      .typed<arange_out::schema>();
}

// aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arange_out::call(const at::Scalar & end, at::Tensor & out) {
    
    static auto op = create_arange_out_typed_handle();
    return op.call(end, out);
}

// aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arange_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & end, at::Tensor & out) {
    
    static auto op = create_arange_out_typed_handle();
    return op.redispatch(dispatchKeySet, end, out);
}

// aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arange_start_out::schema> create_arange_start_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arange_start_out::name, arange_start_out::overload_name)
      .typed<arange_start_out::schema>();
}

// aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arange_start_out::call(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
    
    static auto op = create_arange_start_out_typed_handle();
    return op.call(start, end, step, out);
}

// aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arange_start_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
    
    static auto op = create_arange_start_out_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, step, out);
}

// aten::_dim_arange(Tensor like, int dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_dim_arange::schema> create__dim_arange_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_dim_arange::name, _dim_arange::overload_name)
      .typed<_dim_arange::schema>();
}

// aten::_dim_arange(Tensor like, int dim) -> Tensor
at::Tensor _dim_arange::call(const at::Tensor & like, int64_t dim) {
    
    static auto op = create__dim_arange_typed_handle();
    return op.call(like, dim);
}

// aten::_dim_arange(Tensor like, int dim) -> Tensor
at::Tensor _dim_arange::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & like, int64_t dim) {
    
    static auto op = create__dim_arange_typed_handle();
    return op.redispatch(dispatchKeySet, like, dim);
}

// aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<argmax::schema> create_argmax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(argmax::name, argmax::overload_name)
      .typed<argmax::schema>();
}

// aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
at::Tensor argmax::call(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim) {
    
    static auto op = create_argmax_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
at::Tensor argmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim) {
    
    static auto op = create_argmax_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<argmax_out::schema> create_argmax_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(argmax_out::name, argmax_out::overload_name)
      .typed<argmax_out::schema>();
}

// aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & argmax_out::call(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_argmax_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & argmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_argmax_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<argmin::schema> create_argmin_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(argmin::name, argmin::overload_name)
      .typed<argmin::schema>();
}

// aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
at::Tensor argmin::call(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim) {
    
    static auto op = create_argmin_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
at::Tensor argmin::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim) {
    
    static auto op = create_argmin_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<argmin_out::schema> create_argmin_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(argmin_out::name, argmin_out::overload_name)
      .typed<argmin_out::schema>();
}

// aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & argmin_out::call(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_argmin_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & argmin_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_argmin_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::acosh(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<acosh::schema> create_acosh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(acosh::name, acosh::overload_name)
      .typed<acosh::schema>();
}

// aten::acosh(Tensor self) -> Tensor
at::Tensor acosh::call(const at::Tensor & self) {
    
    static auto op = create_acosh_typed_handle();
    return op.call(self);
}

// aten::acosh(Tensor self) -> Tensor
at::Tensor acosh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_acosh_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::acosh_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<acosh_::schema> create_acosh__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(acosh_::name, acosh_::overload_name)
      .typed<acosh_::schema>();
}

// aten::acosh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & acosh_::call(at::Tensor & self) {
    
    static auto op = create_acosh__typed_handle();
    return op.call(self);
}

// aten::acosh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & acosh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_acosh__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<acosh_out::schema> create_acosh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(acosh_out::name, acosh_out::overload_name)
      .typed<acosh_out::schema>();
}

// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & acosh_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_acosh_out_typed_handle();
    return op.call(self, out);
}

// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & acosh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_acosh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::arccosh(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<arccosh::schema> create_arccosh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arccosh::name, arccosh::overload_name)
      .typed<arccosh::schema>();
}

// aten::arccosh(Tensor self) -> Tensor
at::Tensor arccosh::call(const at::Tensor & self) {
    
    static auto op = create_arccosh_typed_handle();
    return op.call(self);
}

// aten::arccosh(Tensor self) -> Tensor
at::Tensor arccosh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_arccosh_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::arccosh_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arccosh_::schema> create_arccosh__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arccosh_::name, arccosh_::overload_name)
      .typed<arccosh_::schema>();
}

// aten::arccosh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & arccosh_::call(at::Tensor & self) {
    
    static auto op = create_arccosh__typed_handle();
    return op.call(self);
}

// aten::arccosh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & arccosh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_arccosh__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arccosh_out::schema> create_arccosh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arccosh_out::name, arccosh_out::overload_name)
      .typed<arccosh_out::schema>();
}

// aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arccosh_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_arccosh_out_typed_handle();
    return op.call(self, out);
}

// aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arccosh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_arccosh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::asinh(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<asinh::schema> create_asinh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(asinh::name, asinh::overload_name)
      .typed<asinh::schema>();
}

// aten::asinh(Tensor self) -> Tensor
at::Tensor asinh::call(const at::Tensor & self) {
    
    static auto op = create_asinh_typed_handle();
    return op.call(self);
}

// aten::asinh(Tensor self) -> Tensor
at::Tensor asinh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_asinh_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::asinh_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<asinh_::schema> create_asinh__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(asinh_::name, asinh_::overload_name)
      .typed<asinh_::schema>();
}

// aten::asinh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & asinh_::call(at::Tensor & self) {
    
    static auto op = create_asinh__typed_handle();
    return op.call(self);
}

// aten::asinh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & asinh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_asinh__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<asinh_out::schema> create_asinh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(asinh_out::name, asinh_out::overload_name)
      .typed<asinh_out::schema>();
}

// aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & asinh_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_asinh_out_typed_handle();
    return op.call(self, out);
}

// aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & asinh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_asinh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::arcsinh(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<arcsinh::schema> create_arcsinh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arcsinh::name, arcsinh::overload_name)
      .typed<arcsinh::schema>();
}

// aten::arcsinh(Tensor self) -> Tensor
at::Tensor arcsinh::call(const at::Tensor & self) {
    
    static auto op = create_arcsinh_typed_handle();
    return op.call(self);
}

// aten::arcsinh(Tensor self) -> Tensor
at::Tensor arcsinh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_arcsinh_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arcsinh_::schema> create_arcsinh__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arcsinh_::name, arcsinh_::overload_name)
      .typed<arcsinh_::schema>();
}

// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & arcsinh_::call(at::Tensor & self) {
    
    static auto op = create_arcsinh__typed_handle();
    return op.call(self);
}

// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & arcsinh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_arcsinh__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arcsinh_out::schema> create_arcsinh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arcsinh_out::name, arcsinh_out::overload_name)
      .typed<arcsinh_out::schema>();
}

// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arcsinh_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_arcsinh_out_typed_handle();
    return op.call(self, out);
}

// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arcsinh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_arcsinh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::atanh(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<atanh::schema> create_atanh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atanh::name, atanh::overload_name)
      .typed<atanh::schema>();
}

// aten::atanh(Tensor self) -> Tensor
at::Tensor atanh::call(const at::Tensor & self) {
    
    static auto op = create_atanh_typed_handle();
    return op.call(self);
}

// aten::atanh(Tensor self) -> Tensor
at::Tensor atanh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_atanh_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::atanh_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<atanh_::schema> create_atanh__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atanh_::name, atanh_::overload_name)
      .typed<atanh_::schema>();
}

// aten::atanh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & atanh_::call(at::Tensor & self) {
    
    static auto op = create_atanh__typed_handle();
    return op.call(self);
}

// aten::atanh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & atanh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_atanh__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<atanh_out::schema> create_atanh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atanh_out::name, atanh_out::overload_name)
      .typed<atanh_out::schema>();
}

// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & atanh_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_atanh_out_typed_handle();
    return op.call(self, out);
}

// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & atanh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_atanh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::arctanh(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<arctanh::schema> create_arctanh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arctanh::name, arctanh::overload_name)
      .typed<arctanh::schema>();
}

// aten::arctanh(Tensor self) -> Tensor
at::Tensor arctanh::call(const at::Tensor & self) {
    
    static auto op = create_arctanh_typed_handle();
    return op.call(self);
}

// aten::arctanh(Tensor self) -> Tensor
at::Tensor arctanh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_arctanh_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::arctanh_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arctanh_::schema> create_arctanh__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arctanh_::name, arctanh_::overload_name)
      .typed<arctanh_::schema>();
}

// aten::arctanh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & arctanh_::call(at::Tensor & self) {
    
    static auto op = create_arctanh__typed_handle();
    return op.call(self);
}

// aten::arctanh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & arctanh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_arctanh__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arctanh_out::schema> create_arctanh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arctanh_out::name, arctanh_out::overload_name)
      .typed<arctanh_out::schema>();
}

// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arctanh_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_arctanh_out_typed_handle();
    return op.call(self, out);
}

// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arctanh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_arctanh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<as_strided::schema> create_as_strided_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(as_strided::name, as_strided::overload_name)
      .typed<as_strided::schema>();
}

// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)
at::Tensor as_strided::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
    
    static auto op = create_as_strided_typed_handle();
    return op.call(self, size, stride, storage_offset);
}

// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)
at::Tensor as_strided::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
    
    static auto op = create_as_strided_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, stride, storage_offset);
}

// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<as_strided_::schema> create_as_strided__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(as_strided_::name, as_strided_::overload_name)
      .typed<as_strided_::schema>();
}

// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
const at::Tensor & as_strided_::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
    
    static auto op = create_as_strided__typed_handle();
    return op.call(self, size, stride, storage_offset);
}

// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
const at::Tensor & as_strided_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
    
    static auto op = create_as_strided__typed_handle();
    return op.redispatch(dispatchKeySet, self, size, stride, storage_offset);
}

// aten::asin(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<asin::schema> create_asin_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(asin::name, asin::overload_name)
      .typed<asin::schema>();
}

// aten::asin(Tensor self) -> Tensor
at::Tensor asin::call(const at::Tensor & self) {
    
    static auto op = create_asin_typed_handle();
    return op.call(self);
}

// aten::asin(Tensor self) -> Tensor
at::Tensor asin::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_asin_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::asin_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<asin_::schema> create_asin__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(asin_::name, asin_::overload_name)
      .typed<asin_::schema>();
}

// aten::asin_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & asin_::call(at::Tensor & self) {
    
    static auto op = create_asin__typed_handle();
    return op.call(self);
}

// aten::asin_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & asin_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_asin__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<asin_out::schema> create_asin_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(asin_out::name, asin_out::overload_name)
      .typed<asin_out::schema>();
}

// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & asin_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_asin_out_typed_handle();
    return op.call(self, out);
}

// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & asin_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_asin_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::arcsin(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<arcsin::schema> create_arcsin_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arcsin::name, arcsin::overload_name)
      .typed<arcsin::schema>();
}

// aten::arcsin(Tensor self) -> Tensor
at::Tensor arcsin::call(const at::Tensor & self) {
    
    static auto op = create_arcsin_typed_handle();
    return op.call(self);
}

// aten::arcsin(Tensor self) -> Tensor
at::Tensor arcsin::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_arcsin_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::arcsin_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arcsin_::schema> create_arcsin__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arcsin_::name, arcsin_::overload_name)
      .typed<arcsin_::schema>();
}

// aten::arcsin_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & arcsin_::call(at::Tensor & self) {
    
    static auto op = create_arcsin__typed_handle();
    return op.call(self);
}

// aten::arcsin_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & arcsin_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_arcsin__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arcsin_out::schema> create_arcsin_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arcsin_out::name, arcsin_out::overload_name)
      .typed<arcsin_out::schema>();
}

// aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arcsin_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_arcsin_out_typed_handle();
    return op.call(self, out);
}

// aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arcsin_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_arcsin_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::atan(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<atan::schema> create_atan_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atan::name, atan::overload_name)
      .typed<atan::schema>();
}

// aten::atan(Tensor self) -> Tensor
at::Tensor atan::call(const at::Tensor & self) {
    
    static auto op = create_atan_typed_handle();
    return op.call(self);
}

// aten::atan(Tensor self) -> Tensor
at::Tensor atan::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_atan_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::atan_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<atan_::schema> create_atan__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atan_::name, atan_::overload_name)
      .typed<atan_::schema>();
}

// aten::atan_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & atan_::call(at::Tensor & self) {
    
    static auto op = create_atan__typed_handle();
    return op.call(self);
}

// aten::atan_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & atan_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_atan__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<atan_out::schema> create_atan_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atan_out::name, atan_out::overload_name)
      .typed<atan_out::schema>();
}

// aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & atan_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_atan_out_typed_handle();
    return op.call(self, out);
}

// aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & atan_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_atan_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::arctan(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<arctan::schema> create_arctan_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arctan::name, arctan::overload_name)
      .typed<arctan::schema>();
}

// aten::arctan(Tensor self) -> Tensor
at::Tensor arctan::call(const at::Tensor & self) {
    
    static auto op = create_arctan_typed_handle();
    return op.call(self);
}

// aten::arctan(Tensor self) -> Tensor
at::Tensor arctan::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_arctan_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::arctan_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arctan_::schema> create_arctan__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arctan_::name, arctan_::overload_name)
      .typed<arctan_::schema>();
}

// aten::arctan_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & arctan_::call(at::Tensor & self) {
    
    static auto op = create_arctan__typed_handle();
    return op.call(self);
}

// aten::arctan_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & arctan_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_arctan__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arctan_out::schema> create_arctan_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arctan_out::name, arctan_out::overload_name)
      .typed<arctan_out::schema>();
}

// aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arctan_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_arctan_out_typed_handle();
    return op.call(self, out);
}

// aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arctan_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_arctan_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::atleast_1d(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<atleast_1d::schema> create_atleast_1d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atleast_1d::name, atleast_1d::overload_name)
      .typed<atleast_1d::schema>();
}

// aten::atleast_1d(Tensor self) -> Tensor
at::Tensor atleast_1d::call(const at::Tensor & self) {
    
    static auto op = create_atleast_1d_typed_handle();
    return op.call(self);
}

// aten::atleast_1d(Tensor self) -> Tensor
at::Tensor atleast_1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_atleast_1d_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<atleast_1d_Sequence::schema> create_atleast_1d_Sequence_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atleast_1d_Sequence::name, atleast_1d_Sequence::overload_name)
      .typed<atleast_1d_Sequence::schema>();
}

// aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> atleast_1d_Sequence::call(at::TensorList tensors) {
    
    static auto op = create_atleast_1d_Sequence_typed_handle();
    return op.call(tensors);
}

// aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> atleast_1d_Sequence::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_atleast_1d_Sequence_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::atleast_2d(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<atleast_2d::schema> create_atleast_2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atleast_2d::name, atleast_2d::overload_name)
      .typed<atleast_2d::schema>();
}

// aten::atleast_2d(Tensor self) -> Tensor
at::Tensor atleast_2d::call(const at::Tensor & self) {
    
    static auto op = create_atleast_2d_typed_handle();
    return op.call(self);
}

// aten::atleast_2d(Tensor self) -> Tensor
at::Tensor atleast_2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_atleast_2d_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<atleast_2d_Sequence::schema> create_atleast_2d_Sequence_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atleast_2d_Sequence::name, atleast_2d_Sequence::overload_name)
      .typed<atleast_2d_Sequence::schema>();
}

// aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> atleast_2d_Sequence::call(at::TensorList tensors) {
    
    static auto op = create_atleast_2d_Sequence_typed_handle();
    return op.call(tensors);
}

// aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> atleast_2d_Sequence::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_atleast_2d_Sequence_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::atleast_3d(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<atleast_3d::schema> create_atleast_3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atleast_3d::name, atleast_3d::overload_name)
      .typed<atleast_3d::schema>();
}

// aten::atleast_3d(Tensor self) -> Tensor
at::Tensor atleast_3d::call(const at::Tensor & self) {
    
    static auto op = create_atleast_3d_typed_handle();
    return op.call(self);
}

// aten::atleast_3d(Tensor self) -> Tensor
at::Tensor atleast_3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_atleast_3d_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<atleast_3d_Sequence::schema> create_atleast_3d_Sequence_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atleast_3d_Sequence::name, atleast_3d_Sequence::overload_name)
      .typed<atleast_3d_Sequence::schema>();
}

// aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> atleast_3d_Sequence::call(at::TensorList tensors) {
    
    static auto op = create_atleast_3d_Sequence_typed_handle();
    return op.call(tensors);
}

// aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> atleast_3d_Sequence::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_atleast_3d_Sequence_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<baddbmm::schema> create_baddbmm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(baddbmm::name, baddbmm::overload_name)
      .typed<baddbmm::schema>();
}

// aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor baddbmm::call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_baddbmm_typed_handle();
    return op.call(self, batch1, batch2, beta, alpha);
}

// aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor baddbmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_baddbmm_typed_handle();
    return op.redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha);
}

// aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<baddbmm_::schema> create_baddbmm__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(baddbmm_::name, baddbmm_::overload_name)
      .typed<baddbmm_::schema>();
}

// aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
at::Tensor & baddbmm_::call(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_baddbmm__typed_handle();
    return op.call(self, batch1, batch2, beta, alpha);
}

// aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
at::Tensor & baddbmm_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_baddbmm__typed_handle();
    return op.redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha);
}

// aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<baddbmm_out::schema> create_baddbmm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(baddbmm_out::name, baddbmm_out::overload_name)
      .typed<baddbmm_out::schema>();
}

// aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & baddbmm_out::call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_baddbmm_out_typed_handle();
    return op.call(self, batch1, batch2, beta, alpha, out);
}

// aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & baddbmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_baddbmm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha, out);
}

// aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bartlett_window::schema> create_bartlett_window_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bartlett_window::name, bartlett_window::overload_name)
      .typed<bartlett_window::schema>();
}

// aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor bartlett_window::call(int64_t window_length, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_bartlett_window_typed_handle();
    return op.call(window_length, dtype, layout, device, pin_memory);
}

// aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor bartlett_window::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_bartlett_window_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory);
}

// aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bartlett_window_periodic::schema> create_bartlett_window_periodic_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bartlett_window_periodic::name, bartlett_window_periodic::overload_name)
      .typed<bartlett_window_periodic::schema>();
}

// aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor bartlett_window_periodic::call(int64_t window_length, bool periodic, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_bartlett_window_periodic_typed_handle();
    return op.call(window_length, periodic, dtype, layout, device, pin_memory);
}

// aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor bartlett_window_periodic::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_bartlett_window_periodic_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory);
}

// aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm::schema> create_batch_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm::name, batch_norm::overload_name)
      .typed<batch_norm::schema>();
}

// aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor
at::Tensor batch_norm::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
    
    static auto op = create_batch_norm_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
}

// aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor
at::Tensor batch_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
    
    static auto op = create_batch_norm_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
}

// aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantized_batch_norm::schema> create_quantized_batch_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantized_batch_norm::name, quantized_batch_norm::overload_name)
      .typed<quantized_batch_norm::schema>();
}

// aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor
at::Tensor quantized_batch_norm::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) {
    
    static auto op = create_quantized_batch_norm_typed_handle();
    return op.call(input, weight, bias, mean, var, eps, output_scale, output_zero_point);
}

// aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor
at::Tensor quantized_batch_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) {
    
    static auto op = create_quantized_batch_norm_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, mean, var, eps, output_scale, output_zero_point);
}

// aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)
static C10_NOINLINE c10::TypedOperatorHandle<_batch_norm_impl_index::schema> create__batch_norm_impl_index_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_batch_norm_impl_index::name, _batch_norm_impl_index::overload_name)
      .typed<_batch_norm_impl_index::schema>();
}

// aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t> _batch_norm_impl_index::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
    
    static auto op = create__batch_norm_impl_index_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
}

// aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t> _batch_norm_impl_index::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
    
    static auto op = create__batch_norm_impl_index_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
}

// aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_batch_norm_impl_index_backward::schema> create__batch_norm_impl_index_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_batch_norm_impl_index_backward::name, _batch_norm_impl_index_backward::overload_name)
      .typed<_batch_norm_impl_index_backward::schema>();
}

// aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _batch_norm_impl_index_backward::call(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) {
    
    static auto op = create__batch_norm_impl_index_backward_typed_handle();
    return op.call(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace);
}

// aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _batch_norm_impl_index_backward::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) {
    
    static auto op = create__batch_norm_impl_index_backward_typed_handle();
    return op.redispatch(dispatchKeySet, impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace);
}

// aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli::schema> create_bernoulli_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli::name, bernoulli::overload_name)
      .typed<bernoulli::schema>();
}

// aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor
at::Tensor bernoulli::call(const at::Tensor & self, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli_typed_handle();
    return op.call(self, generator);
}

// aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor
at::Tensor bernoulli::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli_typed_handle();
    return op.redispatch(dispatchKeySet, self, generator);
}

// aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli_out::schema> create_bernoulli_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli_out::name, bernoulli_out::overload_name)
      .typed<bernoulli_out::schema>();
}

// aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bernoulli_out::call(const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_bernoulli_out_typed_handle();
    return op.call(self, generator, out);
}

// aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bernoulli_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_bernoulli_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, generator, out);
}

// aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli__Tensor::schema> create_bernoulli__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli__Tensor::name, bernoulli__Tensor::overload_name)
      .typed<bernoulli__Tensor::schema>();
}

// aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & bernoulli__Tensor::call(at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli__Tensor_typed_handle();
    return op.call(self, p, generator);
}

// aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & bernoulli__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator);
}

// aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli__float::schema> create_bernoulli__float_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli__float::name, bernoulli__float::overload_name)
      .typed<bernoulli__float::schema>();
}

// aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & bernoulli__float::call(at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli__float_typed_handle();
    return op.call(self, p, generator);
}

// aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & bernoulli__float::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli__float_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator);
}

// aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli_p::schema> create_bernoulli_p_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli_p::name, bernoulli_p::overload_name)
      .typed<bernoulli_p::schema>();
}

// aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor
at::Tensor bernoulli_p::call(const at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli_p_typed_handle();
    return op.call(self, p, generator);
}

// aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor
at::Tensor bernoulli_p::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli_p_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator);
}

// aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bilinear::schema> create_bilinear_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bilinear::name, bilinear::overload_name)
      .typed<bilinear::schema>();
}

// aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor
at::Tensor bilinear::call(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias) {
    
    static auto op = create_bilinear_typed_handle();
    return op.call(input1, input2, weight, bias);
}

// aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor
at::Tensor bilinear::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias) {
    
    static auto op = create_bilinear_typed_handle();
    return op.redispatch(dispatchKeySet, input1, input2, weight, bias);
}

// aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<binary_cross_entropy::schema> create_binary_cross_entropy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(binary_cross_entropy::name, binary_cross_entropy::overload_name)
      .typed<binary_cross_entropy::schema>();
}

// aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
at::Tensor binary_cross_entropy::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
    
    static auto op = create_binary_cross_entropy_typed_handle();
    return op.call(self, target, weight, reduction);
}

// aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
at::Tensor binary_cross_entropy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
    
    static auto op = create_binary_cross_entropy_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, reduction);
}

// aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<binary_cross_entropy_out::schema> create_binary_cross_entropy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(binary_cross_entropy_out::name, binary_cross_entropy_out::overload_name)
      .typed<binary_cross_entropy_out::schema>();
}

// aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & binary_cross_entropy_out::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_binary_cross_entropy_out_typed_handle();
    return op.call(self, target, weight, reduction, out);
}

// aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & binary_cross_entropy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_binary_cross_entropy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, reduction, out);
}

// aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<binary_cross_entropy_backward::schema> create_binary_cross_entropy_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(binary_cross_entropy_backward::name, binary_cross_entropy_backward::overload_name)
      .typed<binary_cross_entropy_backward::schema>();
}

// aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
at::Tensor binary_cross_entropy_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
    
    static auto op = create_binary_cross_entropy_backward_typed_handle();
    return op.call(grad_output, self, target, weight, reduction);
}

// aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
at::Tensor binary_cross_entropy_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
    
    static auto op = create_binary_cross_entropy_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, weight, reduction);
}

// aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<binary_cross_entropy_backward_grad_input::schema> create_binary_cross_entropy_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(binary_cross_entropy_backward_grad_input::name, binary_cross_entropy_backward_grad_input::overload_name)
      .typed<binary_cross_entropy_backward_grad_input::schema>();
}

// aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & binary_cross_entropy_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
    
    static auto op = create_binary_cross_entropy_backward_grad_input_typed_handle();
    return op.call(grad_output, self, target, weight, reduction, grad_input);
}

// aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & binary_cross_entropy_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
    
    static auto op = create_binary_cross_entropy_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, grad_input);
}

// aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<binary_cross_entropy_with_logits::schema> create_binary_cross_entropy_with_logits_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(binary_cross_entropy_with_logits::name, binary_cross_entropy_with_logits::overload_name)
      .typed<binary_cross_entropy_with_logits::schema>();
}

// aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor
at::Tensor binary_cross_entropy_with_logits::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & pos_weight, int64_t reduction) {
    
    static auto op = create_binary_cross_entropy_with_logits_typed_handle();
    return op.call(self, target, weight, pos_weight, reduction);
}

// aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor
at::Tensor binary_cross_entropy_with_logits::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & pos_weight, int64_t reduction) {
    
    static auto op = create_binary_cross_entropy_with_logits_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, pos_weight, reduction);
}

// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bincount::schema> create_bincount_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bincount::name, bincount::overload_name)
      .typed<bincount::schema>();
}

// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor
at::Tensor bincount::call(const at::Tensor & self, const ::std::optional<at::Tensor> & weights, int64_t minlength) {
    
    static auto op = create_bincount_typed_handle();
    return op.call(self, weights, minlength);
}

// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor
at::Tensor bincount::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & weights, int64_t minlength) {
    
    static auto op = create_bincount_typed_handle();
    return op.redispatch(dispatchKeySet, self, weights, minlength);
}

// aten::bitwise_not(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_not::schema> create_bitwise_not_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_not::name, bitwise_not::overload_name)
      .typed<bitwise_not::schema>();
}

// aten::bitwise_not(Tensor self) -> Tensor
at::Tensor bitwise_not::call(const at::Tensor & self) {
    
    static auto op = create_bitwise_not_typed_handle();
    return op.call(self);
}

// aten::bitwise_not(Tensor self) -> Tensor
at::Tensor bitwise_not::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_bitwise_not_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_not_::schema> create_bitwise_not__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_not_::name, bitwise_not_::overload_name)
      .typed<bitwise_not_::schema>();
}

// aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & bitwise_not_::call(at::Tensor & self) {
    
    static auto op = create_bitwise_not__typed_handle();
    return op.call(self);
}

// aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & bitwise_not_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_bitwise_not__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_not_out::schema> create_bitwise_not_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_not_out::name, bitwise_not_out::overload_name)
      .typed<bitwise_not_out::schema>();
}

// aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_not_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_bitwise_not_out_typed_handle();
    return op.call(self, out);
}

// aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_not_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_bitwise_not_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<copysign_out::schema> create_copysign_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(copysign_out::name, copysign_out::overload_name)
      .typed<copysign_out::schema>();
}

// aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & copysign_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_copysign_out_typed_handle();
    return op.call(self, other, out);
}

// aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & copysign_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_copysign_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<copysign_Tensor::schema> create_copysign_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(copysign_Tensor::name, copysign_Tensor::overload_name)
      .typed<copysign_Tensor::schema>();
}

// aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor copysign_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_copysign_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor copysign_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_copysign_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<copysign__Tensor::schema> create_copysign__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(copysign__Tensor::name, copysign__Tensor::overload_name)
      .typed<copysign__Tensor::schema>();
}

// aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & copysign__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_copysign__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & copysign__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_copysign__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<copysign_Scalar::schema> create_copysign_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(copysign_Scalar::name, copysign_Scalar::overload_name)
      .typed<copysign_Scalar::schema>();
}

// aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor copysign_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_copysign_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor copysign_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_copysign_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<copysign__Scalar::schema> create_copysign__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(copysign__Scalar::name, copysign__Scalar::overload_name)
      .typed<copysign__Scalar::schema>();
}

// aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & copysign__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_copysign__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & copysign__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_copysign__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<copysign_Scalar_out::schema> create_copysign_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(copysign_Scalar_out::name, copysign_Scalar_out::overload_name)
      .typed<copysign_Scalar_out::schema>();
}

// aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & copysign_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_copysign_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & copysign_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_copysign_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::_lazy_clone(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_lazy_clone::schema> create__lazy_clone_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_lazy_clone::name, _lazy_clone::overload_name)
      .typed<_lazy_clone::schema>();
}

// aten::_lazy_clone(Tensor self) -> Tensor
at::Tensor _lazy_clone::call(const at::Tensor & self) {
    
    static auto op = create__lazy_clone_typed_handle();
    return op.call(self);
}

// aten::_lazy_clone(Tensor self) -> Tensor
at::Tensor _lazy_clone::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__lazy_clone_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::logical_not(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logical_not::schema> create_logical_not_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logical_not::name, logical_not::overload_name)
      .typed<logical_not::schema>();
}

// aten::logical_not(Tensor self) -> Tensor
at::Tensor logical_not::call(const at::Tensor & self) {
    
    static auto op = create_logical_not_typed_handle();
    return op.call(self);
}

// aten::logical_not(Tensor self) -> Tensor
at::Tensor logical_not::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_logical_not_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::logical_not_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logical_not_::schema> create_logical_not__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logical_not_::name, logical_not_::overload_name)
      .typed<logical_not_::schema>();
}

// aten::logical_not_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & logical_not_::call(at::Tensor & self) {
    
    static auto op = create_logical_not__typed_handle();
    return op.call(self);
}

// aten::logical_not_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & logical_not_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_logical_not__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logical_not_out::schema> create_logical_not_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logical_not_out::name, logical_not_out::overload_name)
      .typed<logical_not_out::schema>();
}

// aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logical_not_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_logical_not_out_typed_handle();
    return op.call(self, out);
}

// aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logical_not_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_logical_not_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::logical_xor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logical_xor::schema> create_logical_xor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logical_xor::name, logical_xor::overload_name)
      .typed<logical_xor::schema>();
}

// aten::logical_xor(Tensor self, Tensor other) -> Tensor
at::Tensor logical_xor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_xor_typed_handle();
    return op.call(self, other);
}

// aten::logical_xor(Tensor self, Tensor other) -> Tensor
at::Tensor logical_xor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_xor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logical_xor_::schema> create_logical_xor__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logical_xor_::name, logical_xor_::overload_name)
      .typed<logical_xor_::schema>();
}

// aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & logical_xor_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_xor__typed_handle();
    return op.call(self, other);
}

// aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & logical_xor_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_xor__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logical_xor_out::schema> create_logical_xor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logical_xor_out::name, logical_xor_out::overload_name)
      .typed<logical_xor_out::schema>();
}

// aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logical_xor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_logical_xor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logical_xor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_logical_xor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::logical_and(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logical_and::schema> create_logical_and_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logical_and::name, logical_and::overload_name)
      .typed<logical_and::schema>();
}

// aten::logical_and(Tensor self, Tensor other) -> Tensor
at::Tensor logical_and::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_and_typed_handle();
    return op.call(self, other);
}

// aten::logical_and(Tensor self, Tensor other) -> Tensor
at::Tensor logical_and::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_and_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logical_and_::schema> create_logical_and__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logical_and_::name, logical_and_::overload_name)
      .typed<logical_and_::schema>();
}

// aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & logical_and_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_and__typed_handle();
    return op.call(self, other);
}

// aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & logical_and_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_and__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logical_and_out::schema> create_logical_and_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logical_and_out::name, logical_and_out::overload_name)
      .typed<logical_and_out::schema>();
}

// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logical_and_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_logical_and_out_typed_handle();
    return op.call(self, other, out);
}

// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logical_and_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_logical_and_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::logical_or(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logical_or::schema> create_logical_or_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logical_or::name, logical_or::overload_name)
      .typed<logical_or::schema>();
}

// aten::logical_or(Tensor self, Tensor other) -> Tensor
at::Tensor logical_or::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_or_typed_handle();
    return op.call(self, other);
}

// aten::logical_or(Tensor self, Tensor other) -> Tensor
at::Tensor logical_or::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_or_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logical_or_::schema> create_logical_or__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logical_or_::name, logical_or_::overload_name)
      .typed<logical_or_::schema>();
}

// aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & logical_or_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_or__typed_handle();
    return op.call(self, other);
}

// aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & logical_or_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logical_or__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logical_or_out::schema> create_logical_or_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logical_or_out::name, logical_or_out::overload_name)
      .typed<logical_or_out::schema>();
}

// aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logical_or_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_logical_or_out_typed_handle();
    return op.call(self, other, out);
}

// aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logical_or_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_logical_or_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<blackman_window::schema> create_blackman_window_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(blackman_window::name, blackman_window::overload_name)
      .typed<blackman_window::schema>();
}

// aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor blackman_window::call(int64_t window_length, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_blackman_window_typed_handle();
    return op.call(window_length, dtype, layout, device, pin_memory);
}

// aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor blackman_window::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_blackman_window_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory);
}

// aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<blackman_window_periodic::schema> create_blackman_window_periodic_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(blackman_window_periodic::name, blackman_window_periodic::overload_name)
      .typed<blackman_window_periodic::schema>();
}

// aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor blackman_window_periodic::call(int64_t window_length, bool periodic, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_blackman_window_periodic_typed_handle();
    return op.call(window_length, periodic, dtype, layout, device, pin_memory);
}

// aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor blackman_window_periodic::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_blackman_window_periodic_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory);
}

// aten::bmm(Tensor self, Tensor mat2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bmm::schema> create_bmm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bmm::name, bmm::overload_name)
      .typed<bmm::schema>();
}

// aten::bmm(Tensor self, Tensor mat2) -> Tensor
at::Tensor bmm::call(const at::Tensor & self, const at::Tensor & mat2) {
    
    static auto op = create_bmm_typed_handle();
    return op.call(self, mat2);
}

// aten::bmm(Tensor self, Tensor mat2) -> Tensor
at::Tensor bmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2) {
    
    static auto op = create_bmm_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat2);
}

// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bmm_out::schema> create_bmm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bmm_out::name, bmm_out::overload_name)
      .typed<bmm_out::schema>();
}

// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bmm_out::call(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
    
    static auto op = create_bmm_out_typed_handle();
    return op.call(self, mat2, out);
}

// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
    
    static auto op = create_bmm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat2, out);
}

// aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<broadcast_tensors::schema> create_broadcast_tensors_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(broadcast_tensors::name, broadcast_tensors::overload_name)
      .typed<broadcast_tensors::schema>();
}

// aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> broadcast_tensors::call(at::TensorList tensors) {
    
    static auto op = create_broadcast_tensors_typed_handle();
    return op.call(tensors);
}

// aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> broadcast_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_broadcast_tensors_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<broadcast_to::schema> create_broadcast_to_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(broadcast_to::name, broadcast_to::overload_name)
      .typed<broadcast_to::schema>();
}

// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
at::Tensor broadcast_to::call(const at::Tensor & self, c10::SymIntArrayRef size) {
    
    static auto op = create_broadcast_to_typed_handle();
    return op.call(self, size);
}

// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
at::Tensor broadcast_to::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) {
    
    static auto op = create_broadcast_to_typed_handle();
    return op.redispatch(dispatchKeySet, self, size);
}

// aten::_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_broadcast_to::schema> create__sparse_broadcast_to_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_broadcast_to::name, _sparse_broadcast_to::overload_name)
      .typed<_sparse_broadcast_to::schema>();
}

// aten::_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)
at::Tensor _sparse_broadcast_to::call(const at::Tensor & self, at::IntArrayRef size) {
    
    static auto op = create__sparse_broadcast_to_typed_handle();
    return op.call(self, size);
}

// aten::_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)
at::Tensor _sparse_broadcast_to::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) {
    
    static auto op = create__sparse_broadcast_to_typed_handle();
    return op.redispatch(dispatchKeySet, self, size);
}

// aten::cat(Tensor[] tensors, int dim=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cat::schema> create_cat_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cat::name, cat::overload_name)
      .typed<cat::schema>();
}

// aten::cat(Tensor[] tensors, int dim=0) -> Tensor
at::Tensor cat::call(const at::ITensorListRef & tensors, int64_t dim) {
    
    static auto op = create_cat_typed_handle();
    return op.call(tensors, dim);
}

// aten::cat(Tensor[] tensors, int dim=0) -> Tensor
at::Tensor cat::redispatch(c10::DispatchKeySet dispatchKeySet, const at::ITensorListRef & tensors, int64_t dim) {
    
    static auto op = create_cat_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim);
}

// aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cat_out::schema> create_cat_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cat_out::name, cat_out::overload_name)
      .typed<cat_out::schema>();
}

// aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cat_out::call(const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) {
    
    static auto op = create_cat_out_typed_handle();
    return op.call(tensors, dim, out);
}

// aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cat_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) {
    
    static auto op = create_cat_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim, out);
}

// aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cat_names::schema> create_cat_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cat_names::name, cat_names::overload_name)
      .typed<cat_names::schema>();
}

// aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor
at::Tensor cat_names::call(at::TensorList tensors, at::Dimname dim) {
    
    static auto op = create_cat_names_typed_handle();
    return op.call(tensors, dim);
}

// aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor
at::Tensor cat_names::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim) {
    
    static auto op = create_cat_names_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim);
}

// aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cat_names_out::schema> create_cat_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cat_names_out::name, cat_names_out::overload_name)
      .typed<cat_names_out::schema>();
}

// aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cat_names_out::call(at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
    
    static auto op = create_cat_names_out_typed_handle();
    return op.call(tensors, dim, out);
}

// aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cat_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
    
    static auto op = create_cat_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim, out);
}

// aten::concat(Tensor[] tensors, int dim=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<concat::schema> create_concat_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(concat::name, concat::overload_name)
      .typed<concat::schema>();
}

// aten::concat(Tensor[] tensors, int dim=0) -> Tensor
at::Tensor concat::call(at::TensorList tensors, int64_t dim) {
    
    static auto op = create_concat_typed_handle();
    return op.call(tensors, dim);
}

// aten::concat(Tensor[] tensors, int dim=0) -> Tensor
at::Tensor concat::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim) {
    
    static auto op = create_concat_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim);
}

// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<concat_out::schema> create_concat_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(concat_out::name, concat_out::overload_name)
      .typed<concat_out::schema>();
}

// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & concat_out::call(at::TensorList tensors, int64_t dim, at::Tensor & out) {
    
    static auto op = create_concat_out_typed_handle();
    return op.call(tensors, dim, out);
}

// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & concat_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) {
    
    static auto op = create_concat_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim, out);
}

// aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<concat_names::schema> create_concat_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(concat_names::name, concat_names::overload_name)
      .typed<concat_names::schema>();
}

// aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor
at::Tensor concat_names::call(at::TensorList tensors, at::Dimname dim) {
    
    static auto op = create_concat_names_typed_handle();
    return op.call(tensors, dim);
}

// aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor
at::Tensor concat_names::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim) {
    
    static auto op = create_concat_names_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim);
}

// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<concat_names_out::schema> create_concat_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(concat_names_out::name, concat_names_out::overload_name)
      .typed<concat_names_out::schema>();
}

// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & concat_names_out::call(at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
    
    static auto op = create_concat_names_out_typed_handle();
    return op.call(tensors, dim, out);
}

// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & concat_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
    
    static auto op = create_concat_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim, out);
}

// aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<concatenate::schema> create_concatenate_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(concatenate::name, concatenate::overload_name)
      .typed<concatenate::schema>();
}

// aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor
at::Tensor concatenate::call(at::TensorList tensors, int64_t dim) {
    
    static auto op = create_concatenate_typed_handle();
    return op.call(tensors, dim);
}

// aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor
at::Tensor concatenate::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim) {
    
    static auto op = create_concatenate_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim);
}

// aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<concatenate_out::schema> create_concatenate_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(concatenate_out::name, concatenate_out::overload_name)
      .typed<concatenate_out::schema>();
}

// aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & concatenate_out::call(at::TensorList tensors, int64_t dim, at::Tensor & out) {
    
    static auto op = create_concatenate_out_typed_handle();
    return op.call(tensors, dim, out);
}

// aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & concatenate_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) {
    
    static auto op = create_concatenate_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim, out);
}

// aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<concatenate_names::schema> create_concatenate_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(concatenate_names::name, concatenate_names::overload_name)
      .typed<concatenate_names::schema>();
}

// aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor
at::Tensor concatenate_names::call(at::TensorList tensors, at::Dimname dim) {
    
    static auto op = create_concatenate_names_typed_handle();
    return op.call(tensors, dim);
}

// aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor
at::Tensor concatenate_names::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim) {
    
    static auto op = create_concatenate_names_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim);
}

// aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<concatenate_names_out::schema> create_concatenate_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(concatenate_names_out::name, concatenate_names_out::overload_name)
      .typed<concatenate_names_out::schema>();
}

// aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & concatenate_names_out::call(at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
    
    static auto op = create_concatenate_names_out_typed_handle();
    return op.call(tensors, dim, out);
}

// aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & concatenate_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
    
    static auto op = create_concatenate_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim, out);
}

// aten::block_diag(Tensor[] tensors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<block_diag::schema> create_block_diag_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(block_diag::name, block_diag::overload_name)
      .typed<block_diag::schema>();
}

// aten::block_diag(Tensor[] tensors) -> Tensor
at::Tensor block_diag::call(at::TensorList tensors) {
    
    static auto op = create_block_diag_typed_handle();
    return op.call(tensors);
}

// aten::block_diag(Tensor[] tensors) -> Tensor
at::Tensor block_diag::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_block_diag_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::ceil(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ceil::schema> create_ceil_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ceil::name, ceil::overload_name)
      .typed<ceil::schema>();
}

// aten::ceil(Tensor self) -> Tensor
at::Tensor ceil::call(const at::Tensor & self) {
    
    static auto op = create_ceil_typed_handle();
    return op.call(self);
}

// aten::ceil(Tensor self) -> Tensor
at::Tensor ceil::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_ceil_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::ceil_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ceil_::schema> create_ceil__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ceil_::name, ceil_::overload_name)
      .typed<ceil_::schema>();
}

// aten::ceil_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & ceil_::call(at::Tensor & self) {
    
    static auto op = create_ceil__typed_handle();
    return op.call(self);
}

// aten::ceil_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & ceil_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_ceil__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ceil_out::schema> create_ceil_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ceil_out::name, ceil_out::overload_name)
      .typed<ceil_out::schema>();
}

// aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ceil_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_ceil_out_typed_handle();
    return op.call(self, out);
}

// aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ceil_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_ceil_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::chain_matmul(Tensor[] matrices) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<chain_matmul::schema> create_chain_matmul_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(chain_matmul::name, chain_matmul::overload_name)
      .typed<chain_matmul::schema>();
}

// aten::chain_matmul(Tensor[] matrices) -> Tensor
at::Tensor chain_matmul::call(at::TensorList matrices) {
    
    static auto op = create_chain_matmul_typed_handle();
    return op.call(matrices);
}

// aten::chain_matmul(Tensor[] matrices) -> Tensor
at::Tensor chain_matmul::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList matrices) {
    
    static auto op = create_chain_matmul_typed_handle();
    return op.redispatch(dispatchKeySet, matrices);
}

// aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<chain_matmul_out::schema> create_chain_matmul_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(chain_matmul_out::name, chain_matmul_out::overload_name)
      .typed<chain_matmul_out::schema>();
}

// aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & chain_matmul_out::call(at::TensorList matrices, at::Tensor & out) {
    
    static auto op = create_chain_matmul_out_typed_handle();
    return op.call(matrices, out);
}

// aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & chain_matmul_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList matrices, at::Tensor & out) {
    
    static auto op = create_chain_matmul_out_typed_handle();
    return op.redispatch(dispatchKeySet, matrices, out);
}

// aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<unsafe_chunk::schema> create_unsafe_chunk_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unsafe_chunk::name, unsafe_chunk::overload_name)
      .typed<unsafe_chunk::schema>();
}

// aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]
::std::vector<at::Tensor> unsafe_chunk::call(const at::Tensor & self, int64_t chunks, int64_t dim) {
    
    static auto op = create_unsafe_chunk_typed_handle();
    return op.call(self, chunks, dim);
}

// aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]
::std::vector<at::Tensor> unsafe_chunk::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t chunks, int64_t dim) {
    
    static auto op = create_unsafe_chunk_typed_handle();
    return op.redispatch(dispatchKeySet, self, chunks, dim);
}

// aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<chunk::schema> create_chunk_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(chunk::name, chunk::overload_name)
      .typed<chunk::schema>();
}

// aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> chunk::call(const at::Tensor & self, int64_t chunks, int64_t dim) {
    
    static auto op = create_chunk_typed_handle();
    return op.call(self, chunks, dim);
}

// aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> chunk::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t chunks, int64_t dim) {
    
    static auto op = create_chunk_typed_handle();
    return op.redispatch(dispatchKeySet, self, chunks, dim);
}

// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<tensor_split_sections::schema> create_tensor_split_sections_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tensor_split_sections::name, tensor_split_sections::overload_name)
      .typed<tensor_split_sections::schema>();
}

// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> tensor_split_sections::call(const at::Tensor & self, c10::SymInt sections, int64_t dim) {
    
    static auto op = create_tensor_split_sections_typed_handle();
    return op.call(self, sections, dim);
}

// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> tensor_split_sections::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt sections, int64_t dim) {
    
    static auto op = create_tensor_split_sections_typed_handle();
    return op.redispatch(dispatchKeySet, self, sections, dim);
}

// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<tensor_split_indices::schema> create_tensor_split_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tensor_split_indices::name, tensor_split_indices::overload_name)
      .typed<tensor_split_indices::schema>();
}

// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> tensor_split_indices::call(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) {
    
    static auto op = create_tensor_split_indices_typed_handle();
    return op.call(self, indices, dim);
}

// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> tensor_split_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) {
    
    static auto op = create_tensor_split_indices_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, dim);
}

// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<tensor_split_tensor_indices_or_sections::schema> create_tensor_split_tensor_indices_or_sections_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tensor_split_tensor_indices_or_sections::name, tensor_split_tensor_indices_or_sections::overload_name)
      .typed<tensor_split_tensor_indices_or_sections::schema>();
}

// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> tensor_split_tensor_indices_or_sections::call(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) {
    
    static auto op = create_tensor_split_tensor_indices_or_sections_typed_handle();
    return op.call(self, tensor_indices_or_sections, dim);
}

// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> tensor_split_tensor_indices_or_sections::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) {
    
    static auto op = create_tensor_split_tensor_indices_or_sections_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor_indices_or_sections, dim);
}

// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<clamp::schema> create_clamp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp::name, clamp::overload_name)
      .typed<clamp::schema>();
}

// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
at::Tensor clamp::call(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clamp_typed_handle();
    return op.call(self, min, max);
}

// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
at::Tensor clamp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clamp_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<clamp_Tensor::schema> create_clamp_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_Tensor::name, clamp_Tensor::overload_name)
      .typed<clamp_Tensor::schema>();
}

// aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
at::Tensor clamp_Tensor::call(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clamp_Tensor_typed_handle();
    return op.call(self, min, max);
}

// aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
at::Tensor clamp_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clamp_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_::schema> create_clamp__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_::name, clamp_::overload_name)
      .typed<clamp_::schema>();
}

// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
at::Tensor & clamp_::call(at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clamp__typed_handle();
    return op.call(self, min, max);
}

// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
at::Tensor & clamp_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clamp__typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp__Tensor::schema> create_clamp__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp__Tensor::name, clamp__Tensor::overload_name)
      .typed<clamp__Tensor::schema>();
}

// aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
at::Tensor & clamp__Tensor::call(at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clamp__Tensor_typed_handle();
    return op.call(self, min, max);
}

// aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
at::Tensor & clamp__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clamp__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_out::schema> create_clamp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_out::name, clamp_out::overload_name)
      .typed<clamp_out::schema>();
}

// aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max, at::Tensor & out) {
    
    static auto op = create_clamp_out_typed_handle();
    return op.call(self, min, max, out);
}

// aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max, at::Tensor & out) {
    
    static auto op = create_clamp_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max, out);
}

// aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_Tensor_out::schema> create_clamp_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_Tensor_out::name, clamp_Tensor_out::overload_name)
      .typed<clamp_Tensor_out::schema>();
}

// aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_Tensor_out::call(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max, at::Tensor & out) {
    
    static auto op = create_clamp_Tensor_out_typed_handle();
    return op.call(self, min, max, out);
}

// aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max, at::Tensor & out) {
    
    static auto op = create_clamp_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max, out);
}

// aten::clamp_max(Tensor self, Scalar max) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<clamp_max::schema> create_clamp_max_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_max::name, clamp_max::overload_name)
      .typed<clamp_max::schema>();
}

// aten::clamp_max(Tensor self, Scalar max) -> Tensor
at::Tensor clamp_max::call(const at::Tensor & self, const at::Scalar & max) {
    
    static auto op = create_clamp_max_typed_handle();
    return op.call(self, max);
}

// aten::clamp_max(Tensor self, Scalar max) -> Tensor
at::Tensor clamp_max::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & max) {
    
    static auto op = create_clamp_max_typed_handle();
    return op.redispatch(dispatchKeySet, self, max);
}

// aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<clamp_max_Tensor::schema> create_clamp_max_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_max_Tensor::name, clamp_max_Tensor::overload_name)
      .typed<clamp_max_Tensor::schema>();
}

// aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor
at::Tensor clamp_max_Tensor::call(const at::Tensor & self, const at::Tensor & max) {
    
    static auto op = create_clamp_max_Tensor_typed_handle();
    return op.call(self, max);
}

// aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor
at::Tensor clamp_max_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & max) {
    
    static auto op = create_clamp_max_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, max);
}

// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_max_::schema> create_clamp_max__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_max_::name, clamp_max_::overload_name)
      .typed<clamp_max_::schema>();
}

// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)
at::Tensor & clamp_max_::call(at::Tensor & self, const at::Scalar & max) {
    
    static auto op = create_clamp_max__typed_handle();
    return op.call(self, max);
}

// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)
at::Tensor & clamp_max_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & max) {
    
    static auto op = create_clamp_max__typed_handle();
    return op.redispatch(dispatchKeySet, self, max);
}

// aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_max__Tensor::schema> create_clamp_max__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_max__Tensor::name, clamp_max__Tensor::overload_name)
      .typed<clamp_max__Tensor::schema>();
}

// aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)
at::Tensor & clamp_max__Tensor::call(at::Tensor & self, const at::Tensor & max) {
    
    static auto op = create_clamp_max__Tensor_typed_handle();
    return op.call(self, max);
}

// aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)
at::Tensor & clamp_max__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & max) {
    
    static auto op = create_clamp_max__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, max);
}

// aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_max_out::schema> create_clamp_max_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_max_out::name, clamp_max_out::overload_name)
      .typed<clamp_max_out::schema>();
}

// aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_max_out::call(const at::Tensor & self, const at::Scalar & max, at::Tensor & out) {
    
    static auto op = create_clamp_max_out_typed_handle();
    return op.call(self, max, out);
}

// aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_max_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & max, at::Tensor & out) {
    
    static auto op = create_clamp_max_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, max, out);
}

// aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_max_Tensor_out::schema> create_clamp_max_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_max_Tensor_out::name, clamp_max_Tensor_out::overload_name)
      .typed<clamp_max_Tensor_out::schema>();
}

// aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_max_Tensor_out::call(const at::Tensor & self, const at::Tensor & max, at::Tensor & out) {
    
    static auto op = create_clamp_max_Tensor_out_typed_handle();
    return op.call(self, max, out);
}

// aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_max_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & max, at::Tensor & out) {
    
    static auto op = create_clamp_max_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, max, out);
}

// aten::clamp_min(Tensor self, Scalar min) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<clamp_min::schema> create_clamp_min_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_min::name, clamp_min::overload_name)
      .typed<clamp_min::schema>();
}

// aten::clamp_min(Tensor self, Scalar min) -> Tensor
at::Tensor clamp_min::call(const at::Tensor & self, const at::Scalar & min) {
    
    static auto op = create_clamp_min_typed_handle();
    return op.call(self, min);
}

// aten::clamp_min(Tensor self, Scalar min) -> Tensor
at::Tensor clamp_min::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min) {
    
    static auto op = create_clamp_min_typed_handle();
    return op.redispatch(dispatchKeySet, self, min);
}

// aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<clamp_min_Tensor::schema> create_clamp_min_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_min_Tensor::name, clamp_min_Tensor::overload_name)
      .typed<clamp_min_Tensor::schema>();
}

// aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor
at::Tensor clamp_min_Tensor::call(const at::Tensor & self, const at::Tensor & min) {
    
    static auto op = create_clamp_min_Tensor_typed_handle();
    return op.call(self, min);
}

// aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor
at::Tensor clamp_min_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & min) {
    
    static auto op = create_clamp_min_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, min);
}

// aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_min_::schema> create_clamp_min__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_min_::name, clamp_min_::overload_name)
      .typed<clamp_min_::schema>();
}

// aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)
at::Tensor & clamp_min_::call(at::Tensor & self, const at::Scalar & min) {
    
    static auto op = create_clamp_min__typed_handle();
    return op.call(self, min);
}

// aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)
at::Tensor & clamp_min_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min) {
    
    static auto op = create_clamp_min__typed_handle();
    return op.redispatch(dispatchKeySet, self, min);
}

// aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_min__Tensor::schema> create_clamp_min__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_min__Tensor::name, clamp_min__Tensor::overload_name)
      .typed<clamp_min__Tensor::schema>();
}

// aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)
at::Tensor & clamp_min__Tensor::call(at::Tensor & self, const at::Tensor & min) {
    
    static auto op = create_clamp_min__Tensor_typed_handle();
    return op.call(self, min);
}

// aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)
at::Tensor & clamp_min__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & min) {
    
    static auto op = create_clamp_min__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, min);
}

// aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_min_out::schema> create_clamp_min_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_min_out::name, clamp_min_out::overload_name)
      .typed<clamp_min_out::schema>();
}

// aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_min_out::call(const at::Tensor & self, const at::Scalar & min, at::Tensor & out) {
    
    static auto op = create_clamp_min_out_typed_handle();
    return op.call(self, min, out);
}

// aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_min_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min, at::Tensor & out) {
    
    static auto op = create_clamp_min_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, out);
}

// aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clamp_min_Tensor_out::schema> create_clamp_min_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clamp_min_Tensor_out::name, clamp_min_Tensor_out::overload_name)
      .typed<clamp_min_Tensor_out::schema>();
}

// aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_min_Tensor_out::call(const at::Tensor & self, const at::Tensor & min, at::Tensor & out) {
    
    static auto op = create_clamp_min_Tensor_out_typed_handle();
    return op.call(self, min, out);
}

// aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clamp_min_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & min, at::Tensor & out) {
    
    static auto op = create_clamp_min_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, out);
}

// aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<clip::schema> create_clip_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clip::name, clip::overload_name)
      .typed<clip::schema>();
}

// aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
at::Tensor clip::call(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clip_typed_handle();
    return op.call(self, min, max);
}

// aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
at::Tensor clip::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clip_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<clip_Tensor::schema> create_clip_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clip_Tensor::name, clip_Tensor::overload_name)
      .typed<clip_Tensor::schema>();
}

// aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
at::Tensor clip_Tensor::call(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clip_Tensor_typed_handle();
    return op.call(self, min, max);
}

// aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
at::Tensor clip_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clip_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clip_::schema> create_clip__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clip_::name, clip_::overload_name)
      .typed<clip_::schema>();
}

// aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
at::Tensor & clip_::call(at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clip__typed_handle();
    return op.call(self, min, max);
}

// aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
at::Tensor & clip_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
    
    static auto op = create_clip__typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clip__Tensor::schema> create_clip__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clip__Tensor::name, clip__Tensor::overload_name)
      .typed<clip__Tensor::schema>();
}

// aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
at::Tensor & clip__Tensor::call(at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clip__Tensor_typed_handle();
    return op.call(self, min, max);
}

// aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
at::Tensor & clip__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
    
    static auto op = create_clip__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max);
}

// aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clip_out::schema> create_clip_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clip_out::name, clip_out::overload_name)
      .typed<clip_out::schema>();
}

// aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clip_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max, at::Tensor & out) {
    
    static auto op = create_clip_out_typed_handle();
    return op.call(self, min, max, out);
}

// aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clip_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max, at::Tensor & out) {
    
    static auto op = create_clip_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max, out);
}

// aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clip_Tensor_out::schema> create_clip_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clip_Tensor_out::name, clip_Tensor_out::overload_name)
      .typed<clip_Tensor_out::schema>();
}

// aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clip_Tensor_out::call(const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max, at::Tensor & out) {
    
    static auto op = create_clip_Tensor_out_typed_handle();
    return op.call(self, min, max, out);
}

// aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clip_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max, at::Tensor & out) {
    
    static auto op = create_clip_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, min, max, out);
}

// aten::cudnn_is_acceptable(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_is_acceptable::schema> create_cudnn_is_acceptable_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_is_acceptable::name, cudnn_is_acceptable::overload_name)
      .typed<cudnn_is_acceptable::schema>();
}

// aten::cudnn_is_acceptable(Tensor self) -> bool
bool cudnn_is_acceptable::call(const at::Tensor & self) {
    
    static auto op = create_cudnn_is_acceptable_typed_handle();
    return op.call(self);
}

// aten::cudnn_is_acceptable(Tensor self) -> bool
bool cudnn_is_acceptable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_cudnn_is_acceptable_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::complex(Tensor real, Tensor imag) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<complex::schema> create_complex_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(complex::name, complex::overload_name)
      .typed<complex::schema>();
}

// aten::complex(Tensor real, Tensor imag) -> Tensor
at::Tensor complex::call(const at::Tensor & real, const at::Tensor & imag) {
    
    static auto op = create_complex_typed_handle();
    return op.call(real, imag);
}

// aten::complex(Tensor real, Tensor imag) -> Tensor
at::Tensor complex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & real, const at::Tensor & imag) {
    
    static auto op = create_complex_typed_handle();
    return op.redispatch(dispatchKeySet, real, imag);
}

// aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<complex_out::schema> create_complex_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(complex_out::name, complex_out::overload_name)
      .typed<complex_out::schema>();
}

// aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & complex_out::call(const at::Tensor & real, const at::Tensor & imag, at::Tensor & out) {
    
    static auto op = create_complex_out_typed_handle();
    return op.call(real, imag, out);
}

// aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & complex_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & real, const at::Tensor & imag, at::Tensor & out) {
    
    static auto op = create_complex_out_typed_handle();
    return op.redispatch(dispatchKeySet, real, imag, out);
}

// aten::polar(Tensor abs, Tensor angle) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<polar::schema> create_polar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(polar::name, polar::overload_name)
      .typed<polar::schema>();
}

// aten::polar(Tensor abs, Tensor angle) -> Tensor
at::Tensor polar::call(const at::Tensor & abs, const at::Tensor & angle) {
    
    static auto op = create_polar_typed_handle();
    return op.call(abs, angle);
}

// aten::polar(Tensor abs, Tensor angle) -> Tensor
at::Tensor polar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & abs, const at::Tensor & angle) {
    
    static auto op = create_polar_typed_handle();
    return op.redispatch(dispatchKeySet, abs, angle);
}

// aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<polar_out::schema> create_polar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(polar_out::name, polar_out::overload_name)
      .typed<polar_out::schema>();
}

// aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & polar_out::call(const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out) {
    
    static auto op = create_polar_out_typed_handle();
    return op.call(abs, angle, out);
}

// aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & polar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out) {
    
    static auto op = create_polar_out_typed_handle();
    return op.redispatch(dispatchKeySet, abs, angle, out);
}

// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<constant_pad_nd::schema> create_constant_pad_nd_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(constant_pad_nd::name, constant_pad_nd::overload_name)
      .typed<constant_pad_nd::schema>();
}

// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor
at::Tensor constant_pad_nd::call(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value) {
    
    static auto op = create_constant_pad_nd_typed_handle();
    return op.call(self, pad, value);
}

// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor
at::Tensor constant_pad_nd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value) {
    
    static auto op = create_constant_pad_nd_typed_handle();
    return op.redispatch(dispatchKeySet, self, pad, value);
}

// aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<contiguous::schema> create_contiguous_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(contiguous::name, contiguous::overload_name)
      .typed<contiguous::schema>();
}

// aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)
at::Tensor contiguous::call(const at::Tensor & self, at::MemoryFormat memory_format) {
    
    static auto op = create_contiguous_typed_handle();
    return op.call(self, memory_format);
}

// aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)
at::Tensor contiguous::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::MemoryFormat memory_format) {
    
    static auto op = create_contiguous_typed_handle();
    return op.redispatch(dispatchKeySet, self, memory_format);
}

// aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<convolution::schema> create_convolution_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(convolution::name, convolution::overload_name)
      .typed<convolution::schema>();
}

// aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor
at::Tensor convolution::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) {
    
    static auto op = create_convolution_typed_handle();
    return op.call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
}

// aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor
at::Tensor convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) {
    
    static auto op = create_convolution_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
}

// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<convolution_backward::schema> create_convolution_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(convolution_backward::name, convolution_backward::overload_name)
      .typed<convolution_backward::schema>();
}

// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask) {
    
    static auto op = create_convolution_backward_typed_handle();
    return op.call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
}

// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask) {
    
    static auto op = create_convolution_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
}

// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<convolution_overrideable::schema> create_convolution_overrideable_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(convolution_overrideable::name, convolution_overrideable::overload_name)
      .typed<convolution_overrideable::schema>();
}

// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor
at::Tensor convolution_overrideable::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) {
    
    static auto op = create_convolution_overrideable_typed_handle();
    return op.call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
}

// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor
at::Tensor convolution_overrideable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) {
    
    static auto op = create_convolution_overrideable_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
}

// aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
static C10_NOINLINE c10::TypedOperatorHandle<convolution_backward_overrideable::schema> create_convolution_backward_overrideable_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(convolution_backward_overrideable::name, convolution_backward_overrideable::overload_name)
      .typed<convolution_backward_overrideable::schema>();
}

// aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_overrideable::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask) {
    
    static auto op = create_convolution_backward_overrideable_typed_handle();
    return op.call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask);
}

// aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_overrideable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask) {
    
    static auto op = create_convolution_backward_overrideable_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask);
}

// aten::_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_convolution::schema> create__convolution_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_convolution::name, _convolution::overload_name)
      .typed<_convolution::schema>();
}

// aten::_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor
at::Tensor _convolution::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
    
    static auto op = create__convolution_typed_handle();
    return op.call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
}

// aten::_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor
at::Tensor _convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
    
    static auto op = create__convolution_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
}

// aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_convolution_deprecated::schema> create__convolution_deprecated_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_convolution_deprecated::name, _convolution_deprecated::overload_name)
      .typed<_convolution_deprecated::schema>();
}

// aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor
at::Tensor _convolution_deprecated::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) {
    
    static auto op = create__convolution_deprecated_typed_handle();
    return op.call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
}

// aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor
at::Tensor _convolution_deprecated::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) {
    
    static auto op = create__convolution_deprecated_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
}

// aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_convolution_mode::schema> create__convolution_mode_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_convolution_mode::name, _convolution_mode::overload_name)
      .typed<_convolution_mode::schema>();
}

// aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor _convolution_mode::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create__convolution_mode_typed_handle();
    return op.call(input, weight, bias, stride, padding, dilation, groups);
}

// aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor _convolution_mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create__convolution_mode_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups);
}

// aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_convolution_double_backward::schema> create__convolution_double_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_convolution_double_backward::name, _convolution_double_backward::overload_name)
      .typed<_convolution_double_backward::schema>();
}

// aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward::call(const ::std::optional<at::Tensor> & ggI, const ::std::optional<at::Tensor> & ggW, const ::std::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask) {
    
    static auto op = create__convolution_double_backward_typed_handle();
    return op.call(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask);
}

// aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const ::std::optional<at::Tensor> & ggI, const ::std::optional<at::Tensor> & ggW, const ::std::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask) {
    
    static auto op = create__convolution_double_backward_typed_handle();
    return op.redispatch(dispatchKeySet, ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask);
}

// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<conv1d::schema> create_conv1d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conv1d::name, conv1d::overload_name)
      .typed<conv1d::schema>();
}

// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor
at::Tensor conv1d::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_conv1d_typed_handle();
    return op.call(input, weight, bias, stride, padding, dilation, groups);
}

// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor
at::Tensor conv1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_conv1d_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups);
}

// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, SymInt groups=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<conv2d::schema> create_conv2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conv2d::name, conv2d::overload_name)
      .typed<conv2d::schema>();
}

// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, SymInt groups=1) -> Tensor
at::Tensor conv2d::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_conv2d_typed_handle();
    return op.call(input, weight, bias, stride, padding, dilation, groups);
}

// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, SymInt groups=1) -> Tensor
at::Tensor conv2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_conv2d_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups);
}

// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<conv3d::schema> create_conv3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conv3d::name, conv3d::overload_name)
      .typed<conv3d::schema>();
}

// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor
at::Tensor conv3d::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_conv3d_typed_handle();
    return op.call(input, weight, bias, stride, padding, dilation, groups);
}

// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor
at::Tensor conv3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_conv3d_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups);
}

// aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding="valid", SymInt[1] dilation=1, SymInt groups=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<conv1d_padding::schema> create_conv1d_padding_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conv1d_padding::name, conv1d_padding::overload_name)
      .typed<conv1d_padding::schema>();
}

// aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding="valid", SymInt[1] dilation=1, SymInt groups=1) -> Tensor
at::Tensor conv1d_padding::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_conv1d_padding_typed_handle();
    return op.call(input, weight, bias, stride, padding, dilation, groups);
}

// aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding="valid", SymInt[1] dilation=1, SymInt groups=1) -> Tensor
at::Tensor conv1d_padding::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_conv1d_padding_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups);
}

// aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, str padding="valid", SymInt[2] dilation=1, SymInt groups=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<conv2d_padding::schema> create_conv2d_padding_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conv2d_padding::name, conv2d_padding::overload_name)
      .typed<conv2d_padding::schema>();
}

// aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, str padding="valid", SymInt[2] dilation=1, SymInt groups=1) -> Tensor
at::Tensor conv2d_padding::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_conv2d_padding_typed_handle();
    return op.call(input, weight, bias, stride, padding, dilation, groups);
}

// aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, str padding="valid", SymInt[2] dilation=1, SymInt groups=1) -> Tensor
at::Tensor conv2d_padding::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_conv2d_padding_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups);
}

// aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding="valid", SymInt[3] dilation=1, SymInt groups=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<conv3d_padding::schema> create_conv3d_padding_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conv3d_padding::name, conv3d_padding::overload_name)
      .typed<conv3d_padding::schema>();
}

// aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding="valid", SymInt[3] dilation=1, SymInt groups=1) -> Tensor
at::Tensor conv3d_padding::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_conv3d_padding_typed_handle();
    return op.call(input, weight, bias, stride, padding, dilation, groups);
}

// aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding="valid", SymInt[3] dilation=1, SymInt groups=1) -> Tensor
at::Tensor conv3d_padding::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_conv3d_padding_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups);
}

// aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<conv_tbc::schema> create_conv_tbc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conv_tbc::name, conv_tbc::overload_name)
      .typed<conv_tbc::schema>();
}

// aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor
at::Tensor conv_tbc::call(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
    
    static auto op = create_conv_tbc_typed_handle();
    return op.call(self, weight, bias, pad);
}

// aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor
at::Tensor conv_tbc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
    
    static auto op = create_conv_tbc_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, pad);
}

// aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<conv_tbc_backward::schema> create_conv_tbc_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conv_tbc_backward::name, conv_tbc_backward::overload_name)
      .typed<conv_tbc_backward::schema>();
}

// aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> conv_tbc_backward::call(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
    
    static auto op = create_conv_tbc_backward_typed_handle();
    return op.call(self, input, weight, bias, pad);
}

// aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> conv_tbc_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
    
    static auto op = create_conv_tbc_backward_typed_handle();
    return op.redispatch(dispatchKeySet, self, input, weight, bias, pad);
}

// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, SymInt groups=1, SymInt[1] dilation=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<conv_transpose1d::schema> create_conv_transpose1d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conv_transpose1d::name, conv_transpose1d::overload_name)
      .typed<conv_transpose1d::schema>();
}

// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, SymInt groups=1, SymInt[1] dilation=1) -> Tensor
at::Tensor conv_transpose1d::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) {
    
    static auto op = create_conv_transpose1d_typed_handle();
    return op.call(input, weight, bias, stride, padding, output_padding, groups, dilation);
}

// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, SymInt groups=1, SymInt[1] dilation=1) -> Tensor
at::Tensor conv_transpose1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) {
    
    static auto op = create_conv_transpose1d_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, output_padding, groups, dilation);
}

// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<conv_transpose2d_input::schema> create_conv_transpose2d_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conv_transpose2d_input::name, conv_transpose2d_input::overload_name)
      .typed<conv_transpose2d_input::schema>();
}

// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor
at::Tensor conv_transpose2d_input::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) {
    
    static auto op = create_conv_transpose2d_input_typed_handle();
    return op.call(input, weight, bias, stride, padding, output_padding, groups, dilation);
}

// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor
at::Tensor conv_transpose2d_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) {
    
    static auto op = create_conv_transpose2d_input_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, output_padding, groups, dilation);
}

// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<conv_transpose3d_input::schema> create_conv_transpose3d_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conv_transpose3d_input::name, conv_transpose3d_input::overload_name)
      .typed<conv_transpose3d_input::schema>();
}

// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor
at::Tensor conv_transpose3d_input::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) {
    
    static auto op = create_conv_transpose3d_input_typed_handle();
    return op.call(input, weight, bias, stride, padding, output_padding, groups, dilation);
}

// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor
at::Tensor conv_transpose3d_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) {
    
    static auto op = create_conv_transpose3d_input_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, output_padding, groups, dilation);
}

// aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<copy::schema> create_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(copy::name, copy::overload_name)
      .typed<copy::schema>();
}

// aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
at::Tensor copy::call(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
    
    static auto op = create_copy_typed_handle();
    return op.call(self, src, non_blocking);
}

// aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
at::Tensor copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
    
    static auto op = create_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, src, non_blocking);
}

// aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<copy_::schema> create_copy__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(copy_::name, copy_::overload_name)
      .typed<copy_::schema>();
}

// aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
at::Tensor & copy_::call(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
    
    static auto op = create_copy__typed_handle();
    return op.call(self, src, non_blocking);
}

// aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
at::Tensor & copy_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & src, bool non_blocking) {
    
    static auto op = create_copy__typed_handle();
    return op.redispatch(dispatchKeySet, self, src, non_blocking);
}

// aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_copy_from::schema> create__copy_from_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_copy_from::name, _copy_from::overload_name)
      .typed<_copy_from::schema>();
}

// aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
at::Tensor _copy_from::call(const at::Tensor & self, const at::Tensor & dst, bool non_blocking) {
    
    static auto op = create__copy_from_typed_handle();
    return op.call(self, dst, non_blocking);
}

// aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
at::Tensor _copy_from::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, bool non_blocking) {
    
    static auto op = create__copy_from_typed_handle();
    return op.redispatch(dispatchKeySet, self, dst, non_blocking);
}

// aten::_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_copy_from_and_resize::schema> create__copy_from_and_resize_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_copy_from_and_resize::name, _copy_from_and_resize::overload_name)
      .typed<_copy_from_and_resize::schema>();
}

// aten::_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor
at::Tensor _copy_from_and_resize::call(const at::Tensor & self, const at::Tensor & dst) {
    
    static auto op = create__copy_from_and_resize_typed_handle();
    return op.call(self, dst);
}

// aten::_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor
at::Tensor _copy_from_and_resize::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst) {
    
    static auto op = create__copy_from_and_resize_typed_handle();
    return op.redispatch(dispatchKeySet, self, dst);
}

// aten::cos(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cos::schema> create_cos_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cos::name, cos::overload_name)
      .typed<cos::schema>();
}

// aten::cos(Tensor self) -> Tensor
at::Tensor cos::call(const at::Tensor & self) {
    
    static auto op = create_cos_typed_handle();
    return op.call(self);
}

// aten::cos(Tensor self) -> Tensor
at::Tensor cos::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_cos_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::cos_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cos_::schema> create_cos__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cos_::name, cos_::overload_name)
      .typed<cos_::schema>();
}

// aten::cos_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & cos_::call(at::Tensor & self) {
    
    static auto op = create_cos__typed_handle();
    return op.call(self);
}

// aten::cos_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & cos_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_cos__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cos_out::schema> create_cos_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cos_out::name, cos_out::overload_name)
      .typed<cos_out::schema>();
}

// aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cos_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_cos_out_typed_handle();
    return op.call(self, out);
}

// aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cos_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_cos_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::cosh(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cosh::schema> create_cosh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cosh::name, cosh::overload_name)
      .typed<cosh::schema>();
}

// aten::cosh(Tensor self) -> Tensor
at::Tensor cosh::call(const at::Tensor & self) {
    
    static auto op = create_cosh_typed_handle();
    return op.call(self);
}

// aten::cosh(Tensor self) -> Tensor
at::Tensor cosh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_cosh_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::cosh_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cosh_::schema> create_cosh__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cosh_::name, cosh_::overload_name)
      .typed<cosh_::schema>();
}

// aten::cosh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & cosh_::call(at::Tensor & self) {
    
    static auto op = create_cosh__typed_handle();
    return op.call(self);
}

// aten::cosh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & cosh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_cosh__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cosh_out::schema> create_cosh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cosh_out::name, cosh_out::overload_name)
      .typed<cosh_out::schema>();
}

// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cosh_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_cosh_out_typed_handle();
    return op.call(self, out);
}

// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cosh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_cosh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cosine_embedding_loss::schema> create_cosine_embedding_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cosine_embedding_loss::name, cosine_embedding_loss::overload_name)
      .typed<cosine_embedding_loss::schema>();
}

// aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
at::Tensor cosine_embedding_loss::call(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
    
    static auto op = create_cosine_embedding_loss_typed_handle();
    return op.call(input1, input2, target, margin, reduction);
}

// aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
at::Tensor cosine_embedding_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
    
    static auto op = create_cosine_embedding_loss_typed_handle();
    return op.redispatch(dispatchKeySet, input1, input2, target, margin, reduction);
}

// aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<count_nonzero_dim_IntList::schema> create_count_nonzero_dim_IntList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(count_nonzero_dim_IntList::name, count_nonzero_dim_IntList::overload_name)
      .typed<count_nonzero_dim_IntList::schema>();
}

// aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor
at::Tensor count_nonzero_dim_IntList::call(const at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create_count_nonzero_dim_IntList_typed_handle();
    return op.call(self, dim);
}

// aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor
at::Tensor count_nonzero_dim_IntList::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create_count_nonzero_dim_IntList_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::count_nonzero(Tensor self, int? dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<count_nonzero::schema> create_count_nonzero_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(count_nonzero::name, count_nonzero::overload_name)
      .typed<count_nonzero::schema>();
}

// aten::count_nonzero(Tensor self, int? dim=None) -> Tensor
at::Tensor count_nonzero::call(const at::Tensor & self, ::std::optional<int64_t> dim) {
    
    static auto op = create_count_nonzero_typed_handle();
    return op.call(self, dim);
}

// aten::count_nonzero(Tensor self, int? dim=None) -> Tensor
at::Tensor count_nonzero::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim) {
    
    static auto op = create_count_nonzero_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cov::schema> create_cov_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cov::name, cov::overload_name)
      .typed<cov::schema>();
}

// aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor
at::Tensor cov::call(const at::Tensor & self, int64_t correction, const ::std::optional<at::Tensor> & fweights, const ::std::optional<at::Tensor> & aweights) {
    
    static auto op = create_cov_typed_handle();
    return op.call(self, correction, fweights, aweights);
}

// aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor
at::Tensor cov::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t correction, const ::std::optional<at::Tensor> & fweights, const ::std::optional<at::Tensor> & aweights) {
    
    static auto op = create_cov_typed_handle();
    return op.redispatch(dispatchKeySet, self, correction, fweights, aweights);
}

// aten::corrcoef(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<corrcoef::schema> create_corrcoef_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(corrcoef::name, corrcoef::overload_name)
      .typed<corrcoef::schema>();
}

// aten::corrcoef(Tensor self) -> Tensor
at::Tensor corrcoef::call(const at::Tensor & self) {
    
    static auto op = create_corrcoef_typed_handle();
    return op.call(self);
}

// aten::corrcoef(Tensor self) -> Tensor
at::Tensor corrcoef::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_corrcoef_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_affine_grid_generator::schema> create_cudnn_affine_grid_generator_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_affine_grid_generator::name, cudnn_affine_grid_generator::overload_name)
      .typed<cudnn_affine_grid_generator::schema>();
}

// aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid
at::Tensor cudnn_affine_grid_generator::call(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) {
    
    static auto op = create_cudnn_affine_grid_generator_typed_handle();
    return op.call(theta, N, C, H, W);
}

// aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid
at::Tensor cudnn_affine_grid_generator::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) {
    
    static auto op = create_cudnn_affine_grid_generator_typed_handle();
    return op.redispatch(dispatchKeySet, theta, N, C, H, W);
}

// aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_affine_grid_generator_backward::schema> create_cudnn_affine_grid_generator_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_affine_grid_generator_backward::name, cudnn_affine_grid_generator_backward::overload_name)
      .typed<cudnn_affine_grid_generator_backward::schema>();
}

// aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta
at::Tensor cudnn_affine_grid_generator_backward::call(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) {
    
    static auto op = create_cudnn_affine_grid_generator_backward_typed_handle();
    return op.call(grad, N, C, H, W);
}

// aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta
at::Tensor cudnn_affine_grid_generator_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) {
    
    static auto op = create_cudnn_affine_grid_generator_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, N, C, H, W);
}

// aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_batch_norm::schema> create_cudnn_batch_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_batch_norm::name, cudnn_batch_norm::overload_name)
      .typed<cudnn_batch_norm::schema>();
}

// aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
    
    static auto op = create_cudnn_batch_norm_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
}

// aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
    
    static auto op = create_cudnn_batch_norm_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
}

// aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_batch_norm_backward::schema> create_cudnn_batch_norm_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_batch_norm_backward::name, cudnn_batch_norm_backward::overload_name)
      .typed<cudnn_batch_norm_backward::schema>();
}

// aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_backward::call(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace) {
    
    static auto op = create_cudnn_batch_norm_backward_typed_handle();
    return op.call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace);
}

// aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace) {
    
    static auto op = create_cudnn_batch_norm_backward_typed_handle();
    return op.redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace);
}

// aten::cudnn_convolution(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution::schema> create_cudnn_convolution_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_convolution::name, cudnn_convolution::overload_name)
      .typed<cudnn_convolution::schema>();
}

// aten::cudnn_convolution(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
at::Tensor cudnn_convolution::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) {
    
    static auto op = create_cudnn_convolution_typed_handle();
    return op.call(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
}

// aten::cudnn_convolution(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
at::Tensor cudnn_convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) {
    
    static auto op = create_cudnn_convolution_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
}

// aten::cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution_out::schema> create_cudnn_convolution_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_convolution_out::name, cudnn_convolution_out::overload_name)
      .typed<cudnn_convolution_out::schema>();
}

// aten::cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_convolution_out::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
    
    static auto op = create_cudnn_convolution_out_typed_handle();
    return op.call(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
}

// aten::cudnn_convolution.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
    
    static auto op = create_cudnn_convolution_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
}

// aten::cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution_transpose::schema> create_cudnn_convolution_transpose_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_convolution_transpose::name, cudnn_convolution_transpose::overload_name)
      .typed<cudnn_convolution_transpose::schema>();
}

// aten::cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
at::Tensor cudnn_convolution_transpose::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) {
    
    static auto op = create_cudnn_convolution_transpose_typed_handle();
    return op.call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
}

// aten::cudnn_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
at::Tensor cudnn_convolution_transpose::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) {
    
    static auto op = create_cudnn_convolution_transpose_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
}

// aten::_mps_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_mps_convolution_transpose::schema> create__mps_convolution_transpose_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_mps_convolution_transpose::name, _mps_convolution_transpose::overload_name)
      .typed<_mps_convolution_transpose::schema>();
}

// aten::_mps_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor _mps_convolution_transpose::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create__mps_convolution_transpose_typed_handle();
    return op.call(self, weight, padding, output_padding, stride, dilation, groups);
}

// aten::_mps_convolution_transpose(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor _mps_convolution_transpose::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create__mps_convolution_transpose_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups);
}

// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<mps_convolution_transpose_backward::schema> create_mps_convolution_transpose_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mps_convolution_transpose_backward::name, mps_convolution_transpose_backward::overload_name)
      .typed<mps_convolution_transpose_backward::schema>();
}

// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> mps_convolution_transpose_backward::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,2> output_mask) {
    
    static auto op = create_mps_convolution_transpose_backward_typed_handle();
    return op.call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask);
}

// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> mps_convolution_transpose_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,2> output_mask) {
    
    static auto op = create_mps_convolution_transpose_backward_typed_handle();
    return op.redispatch(dispatchKeySet, self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask);
}

// aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution_relu::schema> create_cudnn_convolution_relu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_convolution_relu::name, cudnn_convolution_relu::overload_name)
      .typed<cudnn_convolution_relu::schema>();
}

// aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor cudnn_convolution_relu::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_cudnn_convolution_relu_typed_handle();
    return op.call(self, weight, bias, stride, padding, dilation, groups);
}

// aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor cudnn_convolution_relu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_cudnn_convolution_relu_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, stride, padding, dilation, groups);
}

// aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution_add_relu::schema> create_cudnn_convolution_add_relu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_convolution_add_relu::name, cudnn_convolution_add_relu::overload_name)
      .typed<cudnn_convolution_add_relu::schema>();
}

// aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor cudnn_convolution_add_relu::call(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const ::std::optional<at::Scalar> & alpha, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_cudnn_convolution_add_relu_typed_handle();
    return op.call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
}

// aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor cudnn_convolution_add_relu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const ::std::optional<at::Scalar> & alpha, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_cudnn_convolution_add_relu_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, z, alpha, bias, stride, padding, dilation, groups);
}

// aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_grid_sampler::schema> create_cudnn_grid_sampler_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_grid_sampler::name, cudnn_grid_sampler::overload_name)
      .typed<cudnn_grid_sampler::schema>();
}

// aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output
at::Tensor cudnn_grid_sampler::call(const at::Tensor & self, const at::Tensor & grid) {
    
    static auto op = create_cudnn_grid_sampler_typed_handle();
    return op.call(self, grid);
}

// aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output
at::Tensor cudnn_grid_sampler::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid) {
    
    static auto op = create_cudnn_grid_sampler_typed_handle();
    return op.redispatch(dispatchKeySet, self, grid);
}

// aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_grid_sampler_backward::schema> create_cudnn_grid_sampler_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_grid_sampler_backward::name, cudnn_grid_sampler_backward::overload_name)
      .typed<cudnn_grid_sampler_backward::schema>();
}

// aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)
::std::tuple<at::Tensor,at::Tensor> cudnn_grid_sampler_backward::call(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
    
    static auto op = create_cudnn_grid_sampler_backward_typed_handle();
    return op.call(self, grid, grad_output);
}

// aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)
::std::tuple<at::Tensor,at::Tensor> cudnn_grid_sampler_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
    
    static auto op = create_cudnn_grid_sampler_backward_typed_handle();
    return op.redispatch(dispatchKeySet, self, grid, grad_output);
}

// aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<cummax::schema> create_cummax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cummax::name, cummax::overload_name)
      .typed<cummax::schema>();
}

// aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> cummax::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_cummax_typed_handle();
    return op.call(self, dim);
}

// aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> cummax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_cummax_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<cummax_out::schema> create_cummax_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cummax_out::name, cummax_out::overload_name)
      .typed<cummax_out::schema>();
}

// aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> cummax_out::call(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_cummax_out_typed_handle();
    return op.call(self, dim, values, indices);
}

// aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> cummax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_cummax_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, values, indices);
}

// aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<cummax_dimname::schema> create_cummax_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cummax_dimname::name, cummax_dimname::overload_name)
      .typed<cummax_dimname::schema>();
}

// aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> cummax_dimname::call(const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_cummax_dimname_typed_handle();
    return op.call(self, dim);
}

// aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> cummax_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_cummax_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<cummax_dimname_out::schema> create_cummax_dimname_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cummax_dimname_out::name, cummax_dimname_out::overload_name)
      .typed<cummax_dimname_out::schema>();
}

// aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> cummax_dimname_out::call(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_cummax_dimname_out_typed_handle();
    return op.call(self, dim, values, indices);
}

// aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> cummax_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_cummax_dimname_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, values, indices);
}

// aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_cummax_helper::schema> create__cummax_helper_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cummax_helper::name, _cummax_helper::overload_name)
      .typed<_cummax_helper::schema>();
}

// aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
void _cummax_helper::call(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
    
    static auto op = create__cummax_helper_typed_handle();
    return op.call(self, values, indices, dim);
}

// aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
void _cummax_helper::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
    
    static auto op = create__cummax_helper_typed_handle();
    return op.redispatch(dispatchKeySet, self, values, indices, dim);
}

// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<cummin::schema> create_cummin_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cummin::name, cummin::overload_name)
      .typed<cummin::schema>();
}

// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> cummin::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_cummin_typed_handle();
    return op.call(self, dim);
}

// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> cummin::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_cummin_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<cummin_out::schema> create_cummin_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cummin_out::name, cummin_out::overload_name)
      .typed<cummin_out::schema>();
}

// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> cummin_out::call(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_cummin_out_typed_handle();
    return op.call(self, dim, values, indices);
}

// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> cummin_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_cummin_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, values, indices);
}

// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<cummin_dimname::schema> create_cummin_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cummin_dimname::name, cummin_dimname::overload_name)
      .typed<cummin_dimname::schema>();
}

// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> cummin_dimname::call(const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_cummin_dimname_typed_handle();
    return op.call(self, dim);
}

// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> cummin_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_cummin_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<cummin_dimname_out::schema> create_cummin_dimname_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cummin_dimname_out::name, cummin_dimname_out::overload_name)
      .typed<cummin_dimname_out::schema>();
}

// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> cummin_dimname_out::call(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_cummin_dimname_out_typed_handle();
    return op.call(self, dim, values, indices);
}

// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> cummin_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_cummin_dimname_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, values, indices);
}

// aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_cummin_helper::schema> create__cummin_helper_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cummin_helper::name, _cummin_helper::overload_name)
      .typed<_cummin_helper::schema>();
}

// aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
void _cummin_helper::call(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
    
    static auto op = create__cummin_helper_typed_handle();
    return op.call(self, values, indices, dim);
}

// aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
void _cummin_helper::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
    
    static auto op = create__cummin_helper_typed_handle();
    return op.redispatch(dispatchKeySet, self, values, indices, dim);
}

// aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cummaxmin_backward::schema> create_cummaxmin_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cummaxmin_backward::name, cummaxmin_backward::overload_name)
      .typed<cummaxmin_backward::schema>();
}

// aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor
at::Tensor cummaxmin_backward::call(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) {
    
    static auto op = create_cummaxmin_backward_typed_handle();
    return op.call(grad, input, indices, dim);
}

// aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor
at::Tensor cummaxmin_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) {
    
    static auto op = create_cummaxmin_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, input, indices, dim);
}

// aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cumprod::schema> create_cumprod_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cumprod::name, cumprod::overload_name)
      .typed<cumprod::schema>();
}

// aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor cumprod::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumprod_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor cumprod::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumprod_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cumprod_::schema> create_cumprod__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cumprod_::name, cumprod_::overload_name)
      .typed<cumprod_::schema>();
}

// aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
at::Tensor & cumprod_::call(at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumprod__typed_handle();
    return op.call(self, dim, dtype);
}

// aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
at::Tensor & cumprod_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumprod__typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cumprod_out::schema> create_cumprod_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cumprod_out::name, cumprod_out::overload_name)
      .typed<cumprod_out::schema>();
}

// aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cumprod_out::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_cumprod_out_typed_handle();
    return op.call(self, dim, dtype, out);
}

// aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cumprod_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_cumprod_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype, out);
}

// aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cumprod_dimname::schema> create_cumprod_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cumprod_dimname::name, cumprod_dimname::overload_name)
      .typed<cumprod_dimname::schema>();
}

// aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor cumprod_dimname::call(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumprod_dimname_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor cumprod_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumprod_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cumprod__dimname::schema> create_cumprod__dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cumprod__dimname::name, cumprod__dimname::overload_name)
      .typed<cumprod__dimname::schema>();
}

// aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
at::Tensor & cumprod__dimname::call(at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumprod__dimname_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
at::Tensor & cumprod__dimname::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumprod__dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cumprod_dimname_out::schema> create_cumprod_dimname_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cumprod_dimname_out::name, cumprod_dimname_out::overload_name)
      .typed<cumprod_dimname_out::schema>();
}

// aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cumprod_dimname_out::call(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_cumprod_dimname_out_typed_handle();
    return op.call(self, dim, dtype, out);
}

// aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cumprod_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_cumprod_dimname_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype, out);
}

// aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cumprod_backward::schema> create_cumprod_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cumprod_backward::name, cumprod_backward::overload_name)
      .typed<cumprod_backward::schema>();
}

// aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor
at::Tensor cumprod_backward::call(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) {
    
    static auto op = create_cumprod_backward_typed_handle();
    return op.call(grad, input, dim, output);
}

// aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor
at::Tensor cumprod_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) {
    
    static auto op = create_cumprod_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, input, dim, output);
}

// aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cumsum::schema> create_cumsum_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cumsum::name, cumsum::overload_name)
      .typed<cumsum::schema>();
}

// aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor cumsum::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumsum_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor cumsum::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumsum_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cumsum_::schema> create_cumsum__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cumsum_::name, cumsum_::overload_name)
      .typed<cumsum_::schema>();
}

// aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
at::Tensor & cumsum_::call(at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumsum__typed_handle();
    return op.call(self, dim, dtype);
}

// aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
at::Tensor & cumsum_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumsum__typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cumsum_out::schema> create_cumsum_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cumsum_out::name, cumsum_out::overload_name)
      .typed<cumsum_out::schema>();
}

// aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cumsum_out::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_cumsum_out_typed_handle();
    return op.call(self, dim, dtype, out);
}

// aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cumsum_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_cumsum_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype, out);
}

// aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cumsum_dimname::schema> create_cumsum_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cumsum_dimname::name, cumsum_dimname::overload_name)
      .typed<cumsum_dimname::schema>();
}

// aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor cumsum_dimname::call(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumsum_dimname_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor cumsum_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumsum_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cumsum__dimname::schema> create_cumsum__dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cumsum__dimname::name, cumsum__dimname::overload_name)
      .typed<cumsum__dimname::schema>();
}

// aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
at::Tensor & cumsum__dimname::call(at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumsum__dimname_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)
at::Tensor & cumsum__dimname::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_cumsum__dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cumsum_dimname_out::schema> create_cumsum_dimname_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cumsum_dimname_out::name, cumsum_dimname_out::overload_name)
      .typed<cumsum_dimname_out::schema>();
}

// aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cumsum_dimname_out::call(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_cumsum_dimname_out_typed_handle();
    return op.call(self, dim, dtype, out);
}

// aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cumsum_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_cumsum_dimname_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype, out);
}

// aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cumulative_trapezoid_x::schema> create_cumulative_trapezoid_x_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cumulative_trapezoid_x::name, cumulative_trapezoid_x::overload_name)
      .typed<cumulative_trapezoid_x::schema>();
}

// aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
at::Tensor cumulative_trapezoid_x::call(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
    
    static auto op = create_cumulative_trapezoid_x_typed_handle();
    return op.call(y, x, dim);
}

// aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
at::Tensor cumulative_trapezoid_x::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim) {
    
    static auto op = create_cumulative_trapezoid_x_typed_handle();
    return op.redispatch(dispatchKeySet, y, x, dim);
}

// aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cumulative_trapezoid_dx::schema> create_cumulative_trapezoid_dx_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cumulative_trapezoid_dx::name, cumulative_trapezoid_dx::overload_name)
      .typed<cumulative_trapezoid_dx::schema>();
}

// aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
at::Tensor cumulative_trapezoid_dx::call(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
    
    static auto op = create_cumulative_trapezoid_dx_typed_handle();
    return op.call(y, dx, dim);
}

// aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
at::Tensor cumulative_trapezoid_dx::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
    
    static auto op = create_cumulative_trapezoid_dx_typed_handle();
    return op.redispatch(dispatchKeySet, y, dx, dim);
}

// aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ctc_loss_IntList::schema> create_ctc_loss_IntList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ctc_loss_IntList::name, ctc_loss_IntList::overload_name)
      .typed<ctc_loss_IntList::schema>();
}

// aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
at::Tensor ctc_loss_IntList::call(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
    
    static auto op = create_ctc_loss_IntList_typed_handle();
    return op.call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
}

// aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
at::Tensor ctc_loss_IntList::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
    
    static auto op = create_ctc_loss_IntList_typed_handle();
    return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
}

// aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ctc_loss_Tensor::schema> create_ctc_loss_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ctc_loss_Tensor::name, ctc_loss_Tensor::overload_name)
      .typed<ctc_loss_Tensor::schema>();
}

// aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
at::Tensor ctc_loss_Tensor::call(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
    
    static auto op = create_ctc_loss_Tensor_typed_handle();
    return op.call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
}

// aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
at::Tensor ctc_loss_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
    
    static auto op = create_ctc_loss_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
}

// aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_ctc_loss::schema> create__ctc_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_ctc_loss::name, _ctc_loss::overload_name)
      .typed<_ctc_loss::schema>();
}

// aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _ctc_loss::call(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) {
    
    static auto op = create__ctc_loss_typed_handle();
    return op.call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
}

// aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _ctc_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) {
    
    static auto op = create__ctc_loss_typed_handle();
    return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
}

// aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_ctc_loss_Tensor::schema> create__ctc_loss_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_ctc_loss_Tensor::name, _ctc_loss_Tensor::overload_name)
      .typed<_ctc_loss_Tensor::schema>();
}

// aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _ctc_loss_Tensor::call(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) {
    
    static auto op = create__ctc_loss_Tensor_typed_handle();
    return op.call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
}

// aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _ctc_loss_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) {
    
    static auto op = create__ctc_loss_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
}

// aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_ctc_loss_backward::schema> create__ctc_loss_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_ctc_loss_backward::name, _ctc_loss_backward::overload_name)
      .typed<_ctc_loss_backward::schema>();
}

// aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
at::Tensor _ctc_loss_backward::call(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
    
    static auto op = create__ctc_loss_backward_typed_handle();
    return op.call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
}

// aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
at::Tensor _ctc_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
    
    static auto op = create__ctc_loss_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
}

// aten::_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_ctc_loss_backward_Tensor::schema> create__ctc_loss_backward_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_ctc_loss_backward_Tensor::name, _ctc_loss_backward_Tensor::overload_name)
      .typed<_ctc_loss_backward_Tensor::schema>();
}

// aten::_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
at::Tensor _ctc_loss_backward_Tensor::call(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
    
    static auto op = create__ctc_loss_backward_Tensor_typed_handle();
    return op.call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
}

// aten::_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
at::Tensor _ctc_loss_backward_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
    
    static auto op = create__ctc_loss_backward_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
}

// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<diag_embed::schema> create_diag_embed_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diag_embed::name, diag_embed::overload_name)
      .typed<diag_embed::schema>();
}

// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
at::Tensor diag_embed::call(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
    
    static auto op = create_diag_embed_typed_handle();
    return op.call(self, offset, dim1, dim2);
}

// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
at::Tensor diag_embed::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
    
    static auto op = create_diag_embed_typed_handle();
    return op.redispatch(dispatchKeySet, self, offset, dim1, dim2);
}

// aten::diagflat(Tensor self, int offset=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<diagflat::schema> create_diagflat_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diagflat::name, diagflat::overload_name)
      .typed<diagflat::schema>();
}

// aten::diagflat(Tensor self, int offset=0) -> Tensor
at::Tensor diagflat::call(const at::Tensor & self, int64_t offset) {
    
    static auto op = create_diagflat_typed_handle();
    return op.call(self, offset);
}

// aten::diagflat(Tensor self, int offset=0) -> Tensor
at::Tensor diagflat::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset) {
    
    static auto op = create_diagflat_typed_handle();
    return op.redispatch(dispatchKeySet, self, offset);
}

// aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<diagonal::schema> create_diagonal_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diagonal::name, diagonal::overload_name)
      .typed<diagonal::schema>();
}

// aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)
at::Tensor diagonal::call(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
    
    static auto op = create_diagonal_typed_handle();
    return op.call(self, offset, dim1, dim2);
}

// aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)
at::Tensor diagonal::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
    
    static auto op = create_diagonal_typed_handle();
    return op.redispatch(dispatchKeySet, self, offset, dim1, dim2);
}

// aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_diagonal::schema> create_linalg_diagonal_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_diagonal::name, linalg_diagonal::overload_name)
      .typed<linalg_diagonal::schema>();
}

// aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)
at::Tensor linalg_diagonal::call(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2) {
    
    static auto op = create_linalg_diagonal_typed_handle();
    return op.call(A, offset, dim1, dim2);
}

// aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)
at::Tensor linalg_diagonal::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2) {
    
    static auto op = create_linalg_diagonal_typed_handle();
    return op.redispatch(dispatchKeySet, A, offset, dim1, dim2);
}

// aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<diagonal_Dimname::schema> create_diagonal_Dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diagonal_Dimname::name, diagonal_Dimname::overload_name)
      .typed<diagonal_Dimname::schema>();
}

// aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
at::Tensor diagonal_Dimname::call(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) {
    
    static auto op = create_diagonal_Dimname_typed_handle();
    return op.call(self, outdim, dim1, dim2, offset);
}

// aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
at::Tensor diagonal_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) {
    
    static auto op = create_diagonal_Dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, outdim, dim1, dim2, offset);
}

// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<diagonal_backward::schema> create_diagonal_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diagonal_backward::name, diagonal_backward::overload_name)
      .typed<diagonal_backward::schema>();
}

// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor
at::Tensor diagonal_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
    
    static auto op = create_diagonal_backward_typed_handle();
    return op.call(grad_output, input_sizes, offset, dim1, dim2);
}

// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor
at::Tensor diagonal_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
    
    static auto op = create_diagonal_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input_sizes, offset, dim1, dim2);
}

// aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fill_diagonal_::schema> create_fill_diagonal__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fill_diagonal_::name, fill_diagonal_::overload_name)
      .typed<fill_diagonal_::schema>();
}

// aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)
at::Tensor & fill_diagonal_::call(at::Tensor & self, const at::Scalar & fill_value, bool wrap) {
    
    static auto op = create_fill_diagonal__typed_handle();
    return op.call(self, fill_value, wrap);
}

// aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)
at::Tensor & fill_diagonal_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & fill_value, bool wrap) {
    
    static auto op = create_fill_diagonal__typed_handle();
    return op.redispatch(dispatchKeySet, self, fill_value, wrap);
}

// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<diff::schema> create_diff_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diff::name, diff::overload_name)
      .typed<diff::schema>();
}

// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor
at::Tensor diff::call(const at::Tensor & self, int64_t n, int64_t dim, const ::std::optional<at::Tensor> & prepend, const ::std::optional<at::Tensor> & append) {
    
    static auto op = create_diff_typed_handle();
    return op.call(self, n, dim, prepend, append);
}

// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor
at::Tensor diff::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, int64_t dim, const ::std::optional<at::Tensor> & prepend, const ::std::optional<at::Tensor> & append) {
    
    static auto op = create_diff_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, prepend, append);
}

// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<diff_out::schema> create_diff_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diff_out::name, diff_out::overload_name)
      .typed<diff_out::schema>();
}

// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diff_out::call(const at::Tensor & self, int64_t n, int64_t dim, const ::std::optional<at::Tensor> & prepend, const ::std::optional<at::Tensor> & append, at::Tensor & out) {
    
    static auto op = create_diff_out_typed_handle();
    return op.call(self, n, dim, prepend, append, out);
}

// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diff_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, int64_t dim, const ::std::optional<at::Tensor> & prepend, const ::std::optional<at::Tensor> & append, at::Tensor & out) {
    
    static auto op = create_diff_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, prepend, append, out);
}

// aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<gradient_scalarint::schema> create_gradient_scalarint_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gradient_scalarint::name, gradient_scalarint::overload_name)
      .typed<gradient_scalarint::schema>();
}

// aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalarint::call(const at::Tensor & self, const ::std::optional<at::Scalar> & spacing, ::std::optional<int64_t> dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalarint_typed_handle();
    return op.call(self, spacing, dim, edge_order);
}

// aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalarint::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & spacing, ::std::optional<int64_t> dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalarint_typed_handle();
    return op.redispatch(dispatchKeySet, self, spacing, dim, edge_order);
}

// aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<gradient_scalararray::schema> create_gradient_scalararray_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gradient_scalararray::name, gradient_scalararray::overload_name)
      .typed<gradient_scalararray::schema>();
}

// aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalararray::call(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalararray_typed_handle();
    return op.call(self, spacing, dim, edge_order);
}

// aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalararray::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalararray_typed_handle();
    return op.redispatch(dispatchKeySet, self, spacing, dim, edge_order);
}

// aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<gradient_array::schema> create_gradient_array_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gradient_array::name, gradient_array::overload_name)
      .typed<gradient_array::schema>();
}

// aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_array::call(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_array_typed_handle();
    return op.call(self, dim, edge_order);
}

// aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_array::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_array_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, edge_order);
}

// aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<gradient_scalarrayint::schema> create_gradient_scalarrayint_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gradient_scalarrayint::name, gradient_scalarrayint::overload_name)
      .typed<gradient_scalarrayint::schema>();
}

// aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalarrayint::call(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, ::std::optional<int64_t> dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalarrayint_typed_handle();
    return op.call(self, spacing, dim, edge_order);
}

// aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalarrayint::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, ::std::optional<int64_t> dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalarrayint_typed_handle();
    return op.redispatch(dispatchKeySet, self, spacing, dim, edge_order);
}

// aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<gradient_scalarrayarray::schema> create_gradient_scalarrayarray_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gradient_scalarrayarray::name, gradient_scalarrayarray::overload_name)
      .typed<gradient_scalarrayarray::schema>();
}

// aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalarrayarray::call(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalarrayarray_typed_handle();
    return op.call(self, spacing, dim, edge_order);
}

// aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_scalarrayarray::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_scalarrayarray_typed_handle();
    return op.redispatch(dispatchKeySet, self, spacing, dim, edge_order);
}

// aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<gradient_tensorarrayint::schema> create_gradient_tensorarrayint_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gradient_tensorarrayint::name, gradient_tensorarrayint::overload_name)
      .typed<gradient_tensorarrayint::schema>();
}

// aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_tensorarrayint::call(const at::Tensor & self, at::TensorList spacing, ::std::optional<int64_t> dim, int64_t edge_order) {
    
    static auto op = create_gradient_tensorarrayint_typed_handle();
    return op.call(self, spacing, dim, edge_order);
}

// aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_tensorarrayint::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList spacing, ::std::optional<int64_t> dim, int64_t edge_order) {
    
    static auto op = create_gradient_tensorarrayint_typed_handle();
    return op.redispatch(dispatchKeySet, self, spacing, dim, edge_order);
}

// aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<gradient_tensorarray::schema> create_gradient_tensorarray_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gradient_tensorarray::name, gradient_tensorarray::overload_name)
      .typed<gradient_tensorarray::schema>();
}

// aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_tensorarray::call(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_tensorarray_typed_handle();
    return op.call(self, spacing, dim, edge_order);
}

// aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[]
::std::vector<at::Tensor> gradient_tensorarray::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order) {
    
    static auto op = create_gradient_tensorarray_typed_handle();
    return op.redispatch(dispatchKeySet, self, spacing, dim, edge_order);
}

// aten::div.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<div_Tensor::schema> create_div_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(div_Tensor::name, div_Tensor::overload_name)
      .typed<div_Tensor::schema>();
}

// aten::div.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor div_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_div_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::div.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor div_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_div_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<div__Tensor::schema> create_div__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(div__Tensor::name, div__Tensor::overload_name)
      .typed<div__Tensor::schema>();
}

// aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & div__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_div__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & div__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_div__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<div_out::schema> create_div_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(div_out::name, div_out::overload_name)
      .typed<div_out::schema>();
}

// aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & div_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_div_out_typed_handle();
    return op.call(self, other, out);
}

// aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & div_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_div_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<div_Tensor_mode::schema> create_div_Tensor_mode_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(div_Tensor_mode::name, div_Tensor_mode::overload_name)
      .typed<div_Tensor_mode::schema>();
}

// aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
at::Tensor div_Tensor_mode::call(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_div_Tensor_mode_typed_handle();
    return op.call(self, other, rounding_mode);
}

// aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
at::Tensor div_Tensor_mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_div_Tensor_mode_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, rounding_mode);
}

// aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<div__Tensor_mode::schema> create_div__Tensor_mode_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(div__Tensor_mode::name, div__Tensor_mode::overload_name)
      .typed<div__Tensor_mode::schema>();
}

// aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
at::Tensor & div__Tensor_mode::call(at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_div__Tensor_mode_typed_handle();
    return op.call(self, other, rounding_mode);
}

// aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
at::Tensor & div__Tensor_mode::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_div__Tensor_mode_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, rounding_mode);
}

// aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<div_out_mode::schema> create_div_out_mode_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(div_out_mode::name, div_out_mode::overload_name)
      .typed<div_out_mode::schema>();
}

// aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
at::Tensor & div_out_mode::call(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out) {
    
    static auto op = create_div_out_mode_typed_handle();
    return op.call(self, other, rounding_mode, out);
}

// aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
at::Tensor & div_out_mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out) {
    
    static auto op = create_div_out_mode_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, rounding_mode, out);
}

// aten::div.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<div_Scalar::schema> create_div_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(div_Scalar::name, div_Scalar::overload_name)
      .typed<div_Scalar::schema>();
}

// aten::div.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor div_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_div_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::div.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor div_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_div_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<div__Scalar::schema> create_div__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(div__Scalar::name, div__Scalar::overload_name)
      .typed<div__Scalar::schema>();
}

// aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & div__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_div__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & div__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_div__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<div_Scalar_mode::schema> create_div_Scalar_mode_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(div_Scalar_mode::name, div_Scalar_mode::overload_name)
      .typed<div_Scalar_mode::schema>();
}

// aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
at::Tensor div_Scalar_mode::call(const at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_div_Scalar_mode_typed_handle();
    return op.call(self, other, rounding_mode);
}

// aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
at::Tensor div_Scalar_mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_div_Scalar_mode_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, rounding_mode);
}

// aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<div__Scalar_mode::schema> create_div__Scalar_mode_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(div__Scalar_mode::name, div__Scalar_mode::overload_name)
      .typed<div__Scalar_mode::schema>();
}

// aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
at::Tensor & div__Scalar_mode::call(at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_div__Scalar_mode_typed_handle();
    return op.call(self, other, rounding_mode);
}

// aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
at::Tensor & div__Scalar_mode::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_div__Scalar_mode_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, rounding_mode);
}

// aten::divide.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<divide_Tensor::schema> create_divide_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(divide_Tensor::name, divide_Tensor::overload_name)
      .typed<divide_Tensor::schema>();
}

// aten::divide.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor divide_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_divide_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::divide.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor divide_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_divide_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<divide__Tensor::schema> create_divide__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(divide__Tensor::name, divide__Tensor::overload_name)
      .typed<divide__Tensor::schema>();
}

// aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & divide__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_divide__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & divide__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_divide__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<divide_out::schema> create_divide_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(divide_out::name, divide_out::overload_name)
      .typed<divide_out::schema>();
}

// aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & divide_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_divide_out_typed_handle();
    return op.call(self, other, out);
}

// aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & divide_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_divide_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::divide.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<divide_Scalar::schema> create_divide_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(divide_Scalar::name, divide_Scalar::overload_name)
      .typed<divide_Scalar::schema>();
}

// aten::divide.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor divide_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_divide_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::divide.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor divide_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_divide_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<divide__Scalar::schema> create_divide__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(divide__Scalar::name, divide__Scalar::overload_name)
      .typed<divide__Scalar::schema>();
}

// aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & divide__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_divide__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & divide__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_divide__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<divide_Tensor_mode::schema> create_divide_Tensor_mode_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(divide_Tensor_mode::name, divide_Tensor_mode::overload_name)
      .typed<divide_Tensor_mode::schema>();
}

// aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
at::Tensor divide_Tensor_mode::call(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_divide_Tensor_mode_typed_handle();
    return op.call(self, other, rounding_mode);
}

// aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
at::Tensor divide_Tensor_mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_divide_Tensor_mode_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, rounding_mode);
}

// aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<divide__Tensor_mode::schema> create_divide__Tensor_mode_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(divide__Tensor_mode::name, divide__Tensor_mode::overload_name)
      .typed<divide__Tensor_mode::schema>();
}

// aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
at::Tensor & divide__Tensor_mode::call(at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_divide__Tensor_mode_typed_handle();
    return op.call(self, other, rounding_mode);
}

// aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)
at::Tensor & divide__Tensor_mode::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_divide__Tensor_mode_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, rounding_mode);
}

// aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<divide_out_mode::schema> create_divide_out_mode_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(divide_out_mode::name, divide_out_mode::overload_name)
      .typed<divide_out_mode::schema>();
}

// aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
at::Tensor & divide_out_mode::call(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out) {
    
    static auto op = create_divide_out_mode_typed_handle();
    return op.call(self, other, rounding_mode, out);
}

// aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
at::Tensor & divide_out_mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out) {
    
    static auto op = create_divide_out_mode_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, rounding_mode, out);
}

// aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<divide_Scalar_mode::schema> create_divide_Scalar_mode_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(divide_Scalar_mode::name, divide_Scalar_mode::overload_name)
      .typed<divide_Scalar_mode::schema>();
}

// aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
at::Tensor divide_Scalar_mode::call(const at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_divide_Scalar_mode_typed_handle();
    return op.call(self, other, rounding_mode);
}

// aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
at::Tensor divide_Scalar_mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_divide_Scalar_mode_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, rounding_mode);
}

// aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<divide__Scalar_mode::schema> create_divide__Scalar_mode_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(divide__Scalar_mode::name, divide__Scalar_mode::overload_name)
      .typed<divide__Scalar_mode::schema>();
}

// aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
at::Tensor & divide__Scalar_mode::call(at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_divide__Scalar_mode_typed_handle();
    return op.call(self, other, rounding_mode);
}

// aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)
at::Tensor & divide__Scalar_mode::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode) {
    
    static auto op = create_divide__Scalar_mode_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, rounding_mode);
}

// aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<true_divide_Tensor::schema> create_true_divide_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(true_divide_Tensor::name, true_divide_Tensor::overload_name)
      .typed<true_divide_Tensor::schema>();
}

// aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor true_divide_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_true_divide_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor true_divide_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_true_divide_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<true_divide__Tensor::schema> create_true_divide__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(true_divide__Tensor::name, true_divide__Tensor::overload_name)
      .typed<true_divide__Tensor::schema>();
}

// aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & true_divide__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_true_divide__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & true_divide__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_true_divide__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<true_divide_out::schema> create_true_divide_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(true_divide_out::name, true_divide_out::overload_name)
      .typed<true_divide_out::schema>();
}

// aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & true_divide_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_true_divide_out_typed_handle();
    return op.call(self, other, out);
}

// aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & true_divide_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_true_divide_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<true_divide_Scalar::schema> create_true_divide_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(true_divide_Scalar::name, true_divide_Scalar::overload_name)
      .typed<true_divide_Scalar::schema>();
}

// aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor true_divide_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_true_divide_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor true_divide_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_true_divide_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<true_divide__Scalar::schema> create_true_divide__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(true_divide__Scalar::name, true_divide__Scalar::overload_name)
      .typed<true_divide__Scalar::schema>();
}

// aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & true_divide__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_true_divide__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & true_divide__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_true_divide__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::dot(Tensor self, Tensor tensor) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<dot::schema> create_dot_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dot::name, dot::overload_name)
      .typed<dot::schema>();
}

// aten::dot(Tensor self, Tensor tensor) -> Tensor
at::Tensor dot::call(const at::Tensor & self, const at::Tensor & tensor) {
    
    static auto op = create_dot_typed_handle();
    return op.call(self, tensor);
}

// aten::dot(Tensor self, Tensor tensor) -> Tensor
at::Tensor dot::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor) {
    
    static auto op = create_dot_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor);
}

// aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<dot_out::schema> create_dot_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dot_out::name, dot_out::overload_name)
      .typed<dot_out::schema>();
}

// aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & dot_out::call(const at::Tensor & self, const at::Tensor & tensor, at::Tensor & out) {
    
    static auto op = create_dot_out_typed_handle();
    return op.call(self, tensor, out);
}

// aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & dot_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor, at::Tensor & out) {
    
    static auto op = create_dot_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor, out);
}

// aten::vdot(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<vdot::schema> create_vdot_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(vdot::name, vdot::overload_name)
      .typed<vdot::schema>();
}

// aten::vdot(Tensor self, Tensor other) -> Tensor
at::Tensor vdot::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_vdot_typed_handle();
    return op.call(self, other);
}

// aten::vdot(Tensor self, Tensor other) -> Tensor
at::Tensor vdot::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_vdot_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<vdot_out::schema> create_vdot_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(vdot_out::name, vdot_out::overload_name)
      .typed<vdot_out::schema>();
}

// aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & vdot_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_vdot_out_typed_handle();
    return op.call(self, other, out);
}

// aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & vdot_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_vdot_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<einsum::schema> create_einsum_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(einsum::name, einsum::overload_name)
      .typed<einsum::schema>();
}

// aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor
at::Tensor einsum::call(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) {
    
    static auto op = create_einsum_typed_handle();
    return op.call(equation, tensors, path);
}

// aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor
at::Tensor einsum::redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) {
    
    static auto op = create_einsum_typed_handle();
    return op.redispatch(dispatchKeySet, equation, tensors, path);
}

// aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<embedding::schema> create_embedding_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(embedding::name, embedding::overload_name)
      .typed<embedding::schema>();
}

// aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
at::Tensor embedding::call(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
    
    static auto op = create_embedding_typed_handle();
    return op.call(weight, indices, padding_idx, scale_grad_by_freq, sparse);
}

// aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
at::Tensor embedding::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
    
    static auto op = create_embedding_typed_handle();
    return op.redispatch(dispatchKeySet, weight, indices, padding_idx, scale_grad_by_freq, sparse);
}

// aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<embedding_backward::schema> create_embedding_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(embedding_backward::name, embedding_backward::overload_name)
      .typed<embedding_backward::schema>();
}

// aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor
at::Tensor embedding_backward::call(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
    
    static auto op = create_embedding_backward_typed_handle();
    return op.call(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
}

// aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor
at::Tensor embedding_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
    
    static auto op = create_embedding_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
}

// aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<embedding_dense_backward::schema> create_embedding_dense_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(embedding_dense_backward::name, embedding_dense_backward::overload_name)
      .typed<embedding_dense_backward::schema>();
}

// aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor
at::Tensor embedding_dense_backward::call(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
    
    static auto op = create_embedding_dense_backward_typed_handle();
    return op.call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
}

// aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor
at::Tensor embedding_dense_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
    
    static auto op = create_embedding_dense_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
}

// aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<embedding_renorm_::schema> create_embedding_renorm__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(embedding_renorm_::name, embedding_renorm_::overload_name)
      .typed<embedding_renorm_::schema>();
}

// aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)
at::Tensor & embedding_renorm_::call(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
    
    static auto op = create_embedding_renorm__typed_handle();
    return op.call(self, indices, max_norm, norm_type);
}

// aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)
at::Tensor & embedding_renorm_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
    
    static auto op = create_embedding_renorm__typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, max_norm, norm_type);
}

// aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<embedding_sparse_backward::schema> create_embedding_sparse_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(embedding_sparse_backward::name, embedding_sparse_backward::overload_name)
      .typed<embedding_sparse_backward::schema>();
}

// aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor
at::Tensor embedding_sparse_backward::call(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
    
    static auto op = create_embedding_sparse_backward_typed_handle();
    return op.call(grad, indices, num_weights, padding_idx, scale_grad_by_freq);
}

// aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor
at::Tensor embedding_sparse_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
    
    static auto op = create_embedding_sparse_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, indices, num_weights, padding_idx, scale_grad_by_freq);
}

// aten::_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_forward_only::schema> create__embedding_bag_forward_only_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_embedding_bag_forward_only::name, _embedding_bag_forward_only::overload_name)
      .typed<_embedding_bag_forward_only::schema>();
}

// aten::_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_forward_only::call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
    
    static auto op = create__embedding_bag_forward_only_typed_handle();
    return op.call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
}

// aten::_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_forward_only::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
    
    static auto op = create__embedding_bag_forward_only_typed_handle();
    return op.redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
}

// aten::_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_rowwise_prune::schema> create__rowwise_prune_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_rowwise_prune::name, _rowwise_prune::overload_name)
      .typed<_rowwise_prune::schema>();
}

// aten::_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _rowwise_prune::call(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) {
    
    static auto op = create__rowwise_prune_typed_handle();
    return op.call(weight, mask, compressed_indices_dtype);
}

// aten::_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _rowwise_prune::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) {
    
    static auto op = create__rowwise_prune_typed_handle();
    return op.redispatch(dispatchKeySet, weight, mask, compressed_indices_dtype);
}

// aten::row_stack(Tensor[] tensors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<row_stack::schema> create_row_stack_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(row_stack::name, row_stack::overload_name)
      .typed<row_stack::schema>();
}

// aten::row_stack(Tensor[] tensors) -> Tensor
at::Tensor row_stack::call(at::TensorList tensors) {
    
    static auto op = create_row_stack_typed_handle();
    return op.call(tensors);
}

// aten::row_stack(Tensor[] tensors) -> Tensor
at::Tensor row_stack::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_row_stack_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<row_stack_out::schema> create_row_stack_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(row_stack_out::name, row_stack_out::overload_name)
      .typed<row_stack_out::schema>();
}

// aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & row_stack_out::call(at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_row_stack_out_typed_handle();
    return op.call(tensors, out);
}

// aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & row_stack_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_row_stack_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, out);
}

// aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<embedding_bag::schema> create_embedding_bag_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(embedding_bag::name, embedding_bag::overload_name)
      .typed<embedding_bag::schema>();
}

// aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag::call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset) {
    
    static auto op = create_embedding_bag_typed_handle();
    return op.call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset);
}

// aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset) {
    
    static auto op = create_embedding_bag_typed_handle();
    return op.redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset);
}

// aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<embedding_bag_padding_idx::schema> create_embedding_bag_padding_idx_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(embedding_bag_padding_idx::name, embedding_bag_padding_idx::overload_name)
      .typed<embedding_bag_padding_idx::schema>();
}

// aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag_padding_idx::call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, ::std::optional<int64_t> padding_idx) {
    
    static auto op = create_embedding_bag_padding_idx_typed_handle();
    return op.call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
}

// aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag_padding_idx::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, ::std::optional<int64_t> padding_idx) {
    
    static auto op = create_embedding_bag_padding_idx_typed_handle();
    return op.redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
}

// aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag::schema> create__embedding_bag_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_embedding_bag::name, _embedding_bag::overload_name)
      .typed<_embedding_bag::schema>();
}

// aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag::call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
    
    static auto op = create__embedding_bag_typed_handle();
    return op.call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
}

// aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
    
    static auto op = create__embedding_bag_typed_handle();
    return op.redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
}

// aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_backward::schema> create__embedding_bag_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_embedding_bag_backward::name, _embedding_bag_backward::overload_name)
      .typed<_embedding_bag_backward::schema>();
}

// aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
at::Tensor _embedding_bag_backward::call(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
    
    static auto op = create__embedding_bag_backward_typed_handle();
    return op.call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
}

// aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
at::Tensor _embedding_bag_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
    
    static auto op = create__embedding_bag_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
}

// aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_sparse_backward::schema> create__embedding_bag_sparse_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_embedding_bag_sparse_backward::name, _embedding_bag_sparse_backward::overload_name)
      .typed<_embedding_bag_sparse_backward::schema>();
}

// aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
at::Tensor _embedding_bag_sparse_backward::call(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const ::std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
    
    static auto op = create__embedding_bag_sparse_backward_typed_handle();
    return op.call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
}

// aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
at::Tensor _embedding_bag_sparse_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const ::std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
    
    static auto op = create__embedding_bag_sparse_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
}

// aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_dense_backward::schema> create__embedding_bag_dense_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_embedding_bag_dense_backward::name, _embedding_bag_dense_backward::overload_name)
      .typed<_embedding_bag_dense_backward::schema>();
}

// aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
at::Tensor _embedding_bag_dense_backward::call(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const ::std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
    
    static auto op = create__embedding_bag_dense_backward_typed_handle();
    return op.call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
}

// aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
at::Tensor _embedding_bag_dense_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const ::std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
    
    static auto op = create__embedding_bag_dense_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
}

// aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_per_sample_weights_backward::schema> create__embedding_bag_per_sample_weights_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_embedding_bag_per_sample_weights_backward::name, _embedding_bag_per_sample_weights_backward::overload_name)
      .typed<_embedding_bag_per_sample_weights_backward::schema>();
}

// aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor
at::Tensor _embedding_bag_per_sample_weights_backward::call(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) {
    
    static auto op = create__embedding_bag_per_sample_weights_backward_typed_handle();
    return op.call(grad, weight, indices, offsets, offset2bag, mode, padding_idx);
}

// aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor
at::Tensor _embedding_bag_per_sample_weights_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) {
    
    static auto op = create__embedding_bag_per_sample_weights_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, weight, indices, offsets, offset2bag, mode, padding_idx);
}

// aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<empty_names::schema> create_empty_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_names::name, empty_names::overload_name)
      .typed<empty_names::schema>();
}

// aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor empty_names::call(at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_empty_names_typed_handle();
    return op.call(size, names, dtype, layout, device, pin_memory, memory_format);
}

// aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor empty_names::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_empty_names_typed_handle();
    return op.redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory, memory_format);
}

// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<empty_memory_format::schema> create_empty_memory_format_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_memory_format::name, empty_memory_format::overload_name)
      .typed<empty_memory_format::schema>();
}

// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor empty_memory_format::call(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_empty_memory_format_typed_handle();
    return op.call(size, dtype, layout, device, pin_memory, memory_format);
}

// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor empty_memory_format::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_empty_memory_format_typed_handle();
    return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory, memory_format);
}

// aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<empty_permuted::schema> create_empty_permuted_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_permuted::name, empty_permuted::overload_name)
      .typed<empty_permuted::schema>();
}

// aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor empty_permuted::call(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_empty_permuted_typed_handle();
    return op.call(size, physical_layout, dtype, layout, device, pin_memory);
}

// aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor empty_permuted::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::IntArrayRef physical_layout, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_empty_permuted_typed_handle();
    return op.redispatch(dispatchKeySet, size, physical_layout, dtype, layout, device, pin_memory);
}

// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<new_empty::schema> create_new_empty_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_empty::name, new_empty::overload_name)
      .typed<new_empty::schema>();
}

// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_empty::call(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_empty_typed_handle();
    return op.call(self, size, dtype, layout, device, pin_memory);
}

// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_empty::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_empty_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, dtype, layout, device, pin_memory);
}

// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<new_empty_strided::schema> create_new_empty_strided_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_empty_strided::name, new_empty_strided::overload_name)
      .typed<new_empty_strided::schema>();
}

// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_empty_strided::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_empty_strided_typed_handle();
    return op.call(self, size, stride, dtype, layout, device, pin_memory);
}

// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_empty_strided::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_empty_strided_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, stride, dtype, layout, device, pin_memory);
}

// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<new_full::schema> create_new_full_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_full::name, new_full::overload_name)
      .typed<new_full::schema>();
}

// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_full::call(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_full_typed_handle();
    return op.call(self, size, fill_value, dtype, layout, device, pin_memory);
}

// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_full::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_full_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, fill_value, dtype, layout, device, pin_memory);
}

// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<new_zeros::schema> create_new_zeros_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_zeros::name, new_zeros::overload_name)
      .typed<new_zeros::schema>();
}

// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_zeros::call(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_zeros_typed_handle();
    return op.call(self, size, dtype, layout, device, pin_memory);
}

// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_zeros::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_zeros_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, dtype, layout, device, pin_memory);
}

// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<new_ones::schema> create_new_ones_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_ones::name, new_ones::overload_name)
      .typed<new_ones::schema>();
}

// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_ones::call(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_ones_typed_handle();
    return op.call(self, size, dtype, layout, device, pin_memory);
}

// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor new_ones::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_new_ones_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, dtype, layout, device, pin_memory);
}

// aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_empty_affine_quantized::schema> create__empty_affine_quantized_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_empty_affine_quantized::name, _empty_affine_quantized::overload_name)
      .typed<_empty_affine_quantized::schema>();
}

// aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
at::Tensor _empty_affine_quantized::call(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, double scale, int64_t zero_point, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create__empty_affine_quantized_typed_handle();
    return op.call(size, dtype, layout, device, pin_memory, scale, zero_point, memory_format);
}

// aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
at::Tensor _empty_affine_quantized::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, double scale, int64_t zero_point, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create__empty_affine_quantized_typed_handle();
    return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory, scale, zero_point, memory_format);
}

// aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_empty_per_channel_affine_quantized::schema> create__empty_per_channel_affine_quantized_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_empty_per_channel_affine_quantized::name, _empty_per_channel_affine_quantized::overload_name)
      .typed<_empty_per_channel_affine_quantized::schema>();
}

// aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
at::Tensor _empty_per_channel_affine_quantized::call(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create__empty_per_channel_affine_quantized_typed_handle();
    return op.call(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
}

// aten::_empty_per_channel_affine_quantized(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
at::Tensor _empty_per_channel_affine_quantized::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create__empty_per_channel_affine_quantized_typed_handle();
    return op.redispatch(dispatchKeySet, size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
}

// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<resize_::schema> create_resize__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(resize_::name, resize_::overload_name)
      .typed<resize_::schema>();
}

// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)
const at::Tensor & resize_::call(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_resize__typed_handle();
    return op.call(self, size, memory_format);
}

// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)
const at::Tensor & resize_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_resize__typed_handle();
    return op.redispatch(dispatchKeySet, self, size, memory_format);
}

// aten::_resize_output_(Tensor(a!) self, SymInt[] size, Device device) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_resize_output_::schema> create__resize_output__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_resize_output_::name, _resize_output_::overload_name)
      .typed<_resize_output_::schema>();
}

// aten::_resize_output_(Tensor(a!) self, SymInt[] size, Device device) -> Tensor(a!)
const at::Tensor & _resize_output_::call(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) {
    
    static auto op = create__resize_output__typed_handle();
    return op.call(self, size, device);
}

// aten::_resize_output_(Tensor(a!) self, SymInt[] size, Device device) -> Tensor(a!)
const at::Tensor & _resize_output_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) {
    
    static auto op = create__resize_output__typed_handle();
    return op.redispatch(dispatchKeySet, self, size, device);
}

// aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<empty_quantized::schema> create_empty_quantized_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_quantized::name, empty_quantized::overload_name)
      .typed<empty_quantized::schema>();
}

// aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor empty_quantized::call(at::IntArrayRef size, const at::Tensor & qtensor, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_empty_quantized_typed_handle();
    return op.call(size, qtensor, dtype, layout, device, pin_memory, memory_format);
}

// aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor empty_quantized::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & qtensor, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_empty_quantized_typed_handle();
    return op.redispatch(dispatchKeySet, size, qtensor, dtype, layout, device, pin_memory, memory_format);
}

// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<empty_out::schema> create_empty_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_out::name, empty_out::overload_name)
      .typed<empty_out::schema>();
}

// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_out::call(c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_empty_out_typed_handle();
    return op.call(size, memory_format, out);
}

// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_empty_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, memory_format, out);
}

// aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<empty_like::schema> create_empty_like_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_like::name, empty_like::overload_name)
      .typed<empty_like::schema>();
}

// aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor empty_like::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_empty_like_typed_handle();
    return op.call(self, dtype, layout, device, pin_memory, memory_format);
}

// aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor empty_like::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_empty_like_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format);
}

// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<empty_strided::schema> create_empty_strided_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_strided::name, empty_strided::overload_name)
      .typed<empty_strided::schema>();
}

// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor empty_strided::call(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_empty_strided_typed_handle();
    return op.call(size, stride, dtype, layout, device, pin_memory);
}

// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor empty_strided::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_empty_strided_typed_handle();
    return op.redispatch(dispatchKeySet, size, stride, dtype, layout, device, pin_memory);
}

// aten::erf(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<erf::schema> create_erf_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(erf::name, erf::overload_name)
      .typed<erf::schema>();
}

// aten::erf(Tensor self) -> Tensor
at::Tensor erf::call(const at::Tensor & self) {
    
    static auto op = create_erf_typed_handle();
    return op.call(self);
}

// aten::erf(Tensor self) -> Tensor
at::Tensor erf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_erf_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::erf_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<erf_::schema> create_erf__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(erf_::name, erf_::overload_name)
      .typed<erf_::schema>();
}

// aten::erf_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & erf_::call(at::Tensor & self) {
    
    static auto op = create_erf__typed_handle();
    return op.call(self);
}

// aten::erf_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & erf_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_erf__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<erf_out::schema> create_erf_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(erf_out::name, erf_out::overload_name)
      .typed<erf_out::schema>();
}

// aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & erf_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_erf_out_typed_handle();
    return op.call(self, out);
}

// aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & erf_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_erf_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::erfc(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<erfc::schema> create_erfc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(erfc::name, erfc::overload_name)
      .typed<erfc::schema>();
}

// aten::erfc(Tensor self) -> Tensor
at::Tensor erfc::call(const at::Tensor & self) {
    
    static auto op = create_erfc_typed_handle();
    return op.call(self);
}

// aten::erfc(Tensor self) -> Tensor
at::Tensor erfc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_erfc_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::erfc_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<erfc_::schema> create_erfc__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(erfc_::name, erfc_::overload_name)
      .typed<erfc_::schema>();
}

// aten::erfc_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & erfc_::call(at::Tensor & self) {
    
    static auto op = create_erfc__typed_handle();
    return op.call(self);
}

// aten::erfc_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & erfc_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_erfc__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<erfc_out::schema> create_erfc_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(erfc_out::name, erfc_out::overload_name)
      .typed<erfc_out::schema>();
}

// aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & erfc_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_erfc_out_typed_handle();
    return op.call(self, out);
}

// aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & erfc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_erfc_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::exp(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<exp::schema> create_exp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(exp::name, exp::overload_name)
      .typed<exp::schema>();
}

// aten::exp(Tensor self) -> Tensor
at::Tensor exp::call(const at::Tensor & self) {
    
    static auto op = create_exp_typed_handle();
    return op.call(self);
}

// aten::exp(Tensor self) -> Tensor
at::Tensor exp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_exp_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::exp_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<exp_::schema> create_exp__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(exp_::name, exp_::overload_name)
      .typed<exp_::schema>();
}

// aten::exp_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & exp_::call(at::Tensor & self) {
    
    static auto op = create_exp__typed_handle();
    return op.call(self);
}

// aten::exp_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & exp_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_exp__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<exp_out::schema> create_exp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(exp_out::name, exp_out::overload_name)
      .typed<exp_out::schema>();
}

// aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & exp_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_exp_out_typed_handle();
    return op.call(self, out);
}

// aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & exp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_exp_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::exp2(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<exp2::schema> create_exp2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(exp2::name, exp2::overload_name)
      .typed<exp2::schema>();
}

// aten::exp2(Tensor self) -> Tensor
at::Tensor exp2::call(const at::Tensor & self) {
    
    static auto op = create_exp2_typed_handle();
    return op.call(self);
}

// aten::exp2(Tensor self) -> Tensor
at::Tensor exp2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_exp2_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::exp2_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<exp2_::schema> create_exp2__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(exp2_::name, exp2_::overload_name)
      .typed<exp2_::schema>();
}

// aten::exp2_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & exp2_::call(at::Tensor & self) {
    
    static auto op = create_exp2__typed_handle();
    return op.call(self);
}

// aten::exp2_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & exp2_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_exp2__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<exp2_out::schema> create_exp2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(exp2_out::name, exp2_out::overload_name)
      .typed<exp2_out::schema>();
}

// aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & exp2_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_exp2_out_typed_handle();
    return op.call(self, out);
}

// aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & exp2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_exp2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::expm1(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<expm1::schema> create_expm1_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(expm1::name, expm1::overload_name)
      .typed<expm1::schema>();
}

// aten::expm1(Tensor self) -> Tensor
at::Tensor expm1::call(const at::Tensor & self) {
    
    static auto op = create_expm1_typed_handle();
    return op.call(self);
}

// aten::expm1(Tensor self) -> Tensor
at::Tensor expm1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_expm1_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::expm1_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<expm1_::schema> create_expm1__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(expm1_::name, expm1_::overload_name)
      .typed<expm1_::schema>();
}

// aten::expm1_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & expm1_::call(at::Tensor & self) {
    
    static auto op = create_expm1__typed_handle();
    return op.call(self);
}

// aten::expm1_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & expm1_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_expm1__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<expm1_out::schema> create_expm1_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(expm1_out::name, expm1_out::overload_name)
      .typed<expm1_out::schema>();
}

// aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & expm1_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_expm1_out_typed_handle();
    return op.call(self, out);
}

// aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & expm1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_expm1_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<expand::schema> create_expand_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(expand::name, expand::overload_name)
      .typed<expand::schema>();
}

// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
at::Tensor expand::call(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
    
    static auto op = create_expand_typed_handle();
    return op.call(self, size, implicit);
}

// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
at::Tensor expand::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
    
    static auto op = create_expand_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, implicit);
}

// aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<expand_as::schema> create_expand_as_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(expand_as::name, expand_as::overload_name)
      .typed<expand_as::schema>();
}

// aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)
at::Tensor expand_as::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_expand_as_typed_handle();
    return op.call(self, other);
}

// aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)
at::Tensor expand_as::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_expand_as_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<eye::schema> create_eye_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(eye::name, eye::overload_name)
      .typed<eye::schema>();
}

// aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor eye::call(c10::SymInt n, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_eye_typed_handle();
    return op.call(n, dtype, layout, device, pin_memory);
}

// aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor eye::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_eye_typed_handle();
    return op.redispatch(dispatchKeySet, n, dtype, layout, device, pin_memory);
}

// aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<eye_m::schema> create_eye_m_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(eye_m::name, eye_m::overload_name)
      .typed<eye_m::schema>();
}

// aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor eye_m::call(c10::SymInt n, c10::SymInt m, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_eye_m_typed_handle();
    return op.call(n, m, dtype, layout, device, pin_memory);
}

// aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor eye_m::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::SymInt m, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_eye_m_typed_handle();
    return op.redispatch(dispatchKeySet, n, m, dtype, layout, device, pin_memory);
}

// aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<eye_out::schema> create_eye_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(eye_out::name, eye_out::overload_name)
      .typed<eye_out::schema>();
}

// aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & eye_out::call(c10::SymInt n, at::Tensor & out) {
    
    static auto op = create_eye_out_typed_handle();
    return op.call(n, out);
}

// aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & eye_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, at::Tensor & out) {
    
    static auto op = create_eye_out_typed_handle();
    return op.redispatch(dispatchKeySet, n, out);
}

// aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<eye_m_out::schema> create_eye_m_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(eye_m_out::name, eye_m_out::overload_name)
      .typed<eye_m_out::schema>();
}

// aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & eye_m_out::call(c10::SymInt n, c10::SymInt m, at::Tensor & out) {
    
    static auto op = create_eye_m_out_typed_handle();
    return op.call(n, m, out);
}

// aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & eye_m_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::SymInt m, at::Tensor & out) {
    
    static auto op = create_eye_m_out_typed_handle();
    return op.redispatch(dispatchKeySet, n, m, out);
}

// aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<flatten_using_ints::schema> create_flatten_using_ints_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(flatten_using_ints::name, flatten_using_ints::overload_name)
      .typed<flatten_using_ints::schema>();
}

// aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)
at::Tensor flatten_using_ints::call(const at::Tensor & self, int64_t start_dim, int64_t end_dim) {
    
    static auto op = create_flatten_using_ints_typed_handle();
    return op.call(self, start_dim, end_dim);
}

// aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)
at::Tensor flatten_using_ints::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t start_dim, int64_t end_dim) {
    
    static auto op = create_flatten_using_ints_typed_handle();
    return op.redispatch(dispatchKeySet, self, start_dim, end_dim);
}

// aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<flatten_named_out_dim::schema> create_flatten_named_out_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(flatten_named_out_dim::name, flatten_named_out_dim::overload_name)
      .typed<flatten_named_out_dim::schema>();
}

// aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)
at::Tensor flatten_named_out_dim::call(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) {
    
    static auto op = create_flatten_named_out_dim_typed_handle();
    return op.call(self, start_dim, end_dim, out_dim);
}

// aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)
at::Tensor flatten_named_out_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) {
    
    static auto op = create_flatten_named_out_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, start_dim, end_dim, out_dim);
}

// aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<flatten_using_names::schema> create_flatten_using_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(flatten_using_names::name, flatten_using_names::overload_name)
      .typed<flatten_using_names::schema>();
}

// aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)
at::Tensor flatten_using_names::call(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) {
    
    static auto op = create_flatten_using_names_typed_handle();
    return op.call(self, start_dim, end_dim, out_dim);
}

// aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)
at::Tensor flatten_using_names::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) {
    
    static auto op = create_flatten_using_names_typed_handle();
    return op.redispatch(dispatchKeySet, self, start_dim, end_dim, out_dim);
}

// aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<flatten_DimnameList::schema> create_flatten_DimnameList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(flatten_DimnameList::name, flatten_DimnameList::overload_name)
      .typed<flatten_DimnameList::schema>();
}

// aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)
at::Tensor flatten_DimnameList::call(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) {
    
    static auto op = create_flatten_DimnameList_typed_handle();
    return op.call(self, dims, out_dim);
}

// aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)
at::Tensor flatten_DimnameList::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) {
    
    static auto op = create_flatten_DimnameList_typed_handle();
    return op.redispatch(dispatchKeySet, self, dims, out_dim);
}

// aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<unflatten_int::schema> create_unflatten_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unflatten_int::name, unflatten_int::overload_name)
      .typed<unflatten_int::schema>();
}

// aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a)
at::Tensor unflatten_int::call(const at::Tensor & self, int64_t dim, c10::SymIntArrayRef sizes) {
    
    static auto op = create_unflatten_int_typed_handle();
    return op.call(self, dim, sizes);
}

// aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a)
at::Tensor unflatten_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymIntArrayRef sizes) {
    
    static auto op = create_unflatten_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, sizes);
}

// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<unflatten_Dimname::schema> create_unflatten_Dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unflatten_Dimname::name, unflatten_Dimname::overload_name)
      .typed<unflatten_Dimname::schema>();
}

// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a)
at::Tensor unflatten_Dimname::call(const at::Tensor & self, at::Dimname dim, c10::SymIntArrayRef sizes, at::DimnameList names) {
    
    static auto op = create_unflatten_Dimname_typed_handle();
    return op.call(self, dim, sizes, names);
}

// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a)
at::Tensor unflatten_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::SymIntArrayRef sizes, at::DimnameList names) {
    
    static auto op = create_unflatten_Dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, sizes, names);
}

// aten::fill.Scalar(Tensor self, Scalar value) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fill_Scalar::schema> create_fill_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fill_Scalar::name, fill_Scalar::overload_name)
      .typed<fill_Scalar::schema>();
}

// aten::fill.Scalar(Tensor self, Scalar value) -> Tensor
at::Tensor fill_Scalar::call(const at::Tensor & self, const at::Scalar & value) {
    
    static auto op = create_fill_Scalar_typed_handle();
    return op.call(self, value);
}

// aten::fill.Scalar(Tensor self, Scalar value) -> Tensor
at::Tensor fill_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & value) {
    
    static auto op = create_fill_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, value);
}

// aten::fill.Tensor(Tensor self, Tensor value) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fill_Tensor::schema> create_fill_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fill_Tensor::name, fill_Tensor::overload_name)
      .typed<fill_Tensor::schema>();
}

// aten::fill.Tensor(Tensor self, Tensor value) -> Tensor
at::Tensor fill_Tensor::call(const at::Tensor & self, const at::Tensor & value) {
    
    static auto op = create_fill_Tensor_typed_handle();
    return op.call(self, value);
}

// aten::fill.Tensor(Tensor self, Tensor value) -> Tensor
at::Tensor fill_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & value) {
    
    static auto op = create_fill_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, value);
}

// aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fill__Scalar::schema> create_fill__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fill__Scalar::name, fill__Scalar::overload_name)
      .typed<fill__Scalar::schema>();
}

// aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
at::Tensor & fill__Scalar::call(at::Tensor & self, const at::Scalar & value) {
    
    static auto op = create_fill__Scalar_typed_handle();
    return op.call(self, value);
}

// aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
at::Tensor & fill__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & value) {
    
    static auto op = create_fill__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, value);
}

// aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fill__Tensor::schema> create_fill__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fill__Tensor::name, fill__Tensor::overload_name)
      .typed<fill__Tensor::schema>();
}

// aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)
at::Tensor & fill__Tensor::call(at::Tensor & self, const at::Tensor & value) {
    
    static auto op = create_fill__Tensor_typed_handle();
    return op.call(self, value);
}

// aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)
at::Tensor & fill__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & value) {
    
    static auto op = create_fill__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, value);
}

// aten::floor(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<floor::schema> create_floor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(floor::name, floor::overload_name)
      .typed<floor::schema>();
}

// aten::floor(Tensor self) -> Tensor
at::Tensor floor::call(const at::Tensor & self) {
    
    static auto op = create_floor_typed_handle();
    return op.call(self);
}

// aten::floor(Tensor self) -> Tensor
at::Tensor floor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_floor_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::floor_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<floor_::schema> create_floor__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(floor_::name, floor_::overload_name)
      .typed<floor_::schema>();
}

// aten::floor_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & floor_::call(at::Tensor & self) {
    
    static auto op = create_floor__typed_handle();
    return op.call(self);
}

// aten::floor_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & floor_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_floor__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<floor_out::schema> create_floor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(floor_out::name, floor_out::overload_name)
      .typed<floor_out::schema>();
}

// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & floor_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_floor_out_typed_handle();
    return op.call(self, out);
}

// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & floor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_floor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::floor_divide(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<floor_divide::schema> create_floor_divide_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(floor_divide::name, floor_divide::overload_name)
      .typed<floor_divide::schema>();
}

// aten::floor_divide(Tensor self, Tensor other) -> Tensor
at::Tensor floor_divide::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_floor_divide_typed_handle();
    return op.call(self, other);
}

// aten::floor_divide(Tensor self, Tensor other) -> Tensor
at::Tensor floor_divide::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_floor_divide_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<floor_divide__Tensor::schema> create_floor_divide__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(floor_divide__Tensor::name, floor_divide__Tensor::overload_name)
      .typed<floor_divide__Tensor::schema>();
}

// aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & floor_divide__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_floor_divide__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & floor_divide__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_floor_divide__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<floor_divide_out::schema> create_floor_divide_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(floor_divide_out::name, floor_divide_out::overload_name)
      .typed<floor_divide_out::schema>();
}

// aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & floor_divide_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_floor_divide_out_typed_handle();
    return op.call(self, other, out);
}

// aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & floor_divide_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_floor_divide_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<floor_divide_Scalar::schema> create_floor_divide_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(floor_divide_Scalar::name, floor_divide_Scalar::overload_name)
      .typed<floor_divide_Scalar::schema>();
}

// aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor floor_divide_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_floor_divide_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor floor_divide_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_floor_divide_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<floor_divide__Scalar::schema> create_floor_divide__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(floor_divide__Scalar::name, floor_divide__Scalar::overload_name)
      .typed<floor_divide__Scalar::schema>();
}

// aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & floor_divide__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_floor_divide__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & floor_divide__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_floor_divide__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::frac(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<frac::schema> create_frac_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(frac::name, frac::overload_name)
      .typed<frac::schema>();
}

// aten::frac(Tensor self) -> Tensor
at::Tensor frac::call(const at::Tensor & self) {
    
    static auto op = create_frac_typed_handle();
    return op.call(self);
}

// aten::frac(Tensor self) -> Tensor
at::Tensor frac::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_frac_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::frac_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<frac_::schema> create_frac__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(frac_::name, frac_::overload_name)
      .typed<frac_::schema>();
}

// aten::frac_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & frac_::call(at::Tensor & self) {
    
    static auto op = create_frac__typed_handle();
    return op.call(self);
}

// aten::frac_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & frac_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_frac__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<frac_out::schema> create_frac_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(frac_out::name, frac_out::overload_name)
      .typed<frac_out::schema>();
}

// aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & frac_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_frac_out_typed_handle();
    return op.call(self, out);
}

// aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & frac_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_frac_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<full_names::schema> create_full_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(full_names::name, full_names::overload_name)
      .typed<full_names::schema>();
}

// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor full_names::call(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_full_names_typed_handle();
    return op.call(size, fill_value, names, dtype, layout, device, pin_memory);
}

// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor full_names::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_full_names_typed_handle();
    return op.redispatch(dispatchKeySet, size, fill_value, names, dtype, layout, device, pin_memory);
}

// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<full::schema> create_full_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(full::name, full::overload_name)
      .typed<full::schema>();
}

// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor full::call(c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_full_typed_handle();
    return op.call(size, fill_value, dtype, layout, device, pin_memory);
}

// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor full::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_full_typed_handle();
    return op.redispatch(dispatchKeySet, size, fill_value, dtype, layout, device, pin_memory);
}

// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<full_out::schema> create_full_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(full_out::name, full_out::overload_name)
      .typed<full_out::schema>();
}

// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & full_out::call(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
    
    static auto op = create_full_out_typed_handle();
    return op.call(size, fill_value, out);
}

// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & full_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
    
    static auto op = create_full_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, fill_value, out);
}

// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<full_like::schema> create_full_like_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(full_like::name, full_like::overload_name)
      .typed<full_like::schema>();
}

// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor full_like::call(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_full_like_typed_handle();
    return op.call(self, fill_value, dtype, layout, device, pin_memory, memory_format);
}

// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor full_like::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_full_like_typed_handle();
    return op.redispatch(dispatchKeySet, self, fill_value, dtype, layout, device, pin_memory, memory_format);
}

// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<from_file::schema> create_from_file_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(from_file::name, from_file::overload_name)
      .typed<from_file::schema>();
}

// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor from_file::call(c10::string_view filename, ::std::optional<bool> shared, ::std::optional<int64_t> size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_from_file_typed_handle();
    return op.call(filename, shared, size, dtype, layout, device, pin_memory);
}

// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor from_file::redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, ::std::optional<bool> shared, ::std::optional<int64_t> size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_from_file_typed_handle();
    return op.redispatch(dispatchKeySet, filename, shared, size, dtype, layout, device, pin_memory);
}

// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gcd_out::schema> create_gcd_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gcd_out::name, gcd_out::overload_name)
      .typed<gcd_out::schema>();
}

// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gcd_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_gcd_out_typed_handle();
    return op.call(self, other, out);
}

// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gcd_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_gcd_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::gcd(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<gcd::schema> create_gcd_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gcd::name, gcd::overload_name)
      .typed<gcd::schema>();
}

// aten::gcd(Tensor self, Tensor other) -> Tensor
at::Tensor gcd::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gcd_typed_handle();
    return op.call(self, other);
}

// aten::gcd(Tensor self, Tensor other) -> Tensor
at::Tensor gcd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gcd_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gcd_::schema> create_gcd__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gcd_::name, gcd_::overload_name)
      .typed<gcd_::schema>();
}

// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & gcd_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gcd__typed_handle();
    return op.call(self, other);
}

// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & gcd_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gcd__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lcm_out::schema> create_lcm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lcm_out::name, lcm_out::overload_name)
      .typed<lcm_out::schema>();
}

// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lcm_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_lcm_out_typed_handle();
    return op.call(self, other, out);
}

// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lcm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_lcm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::lcm(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<lcm::schema> create_lcm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lcm::name, lcm::overload_name)
      .typed<lcm::schema>();
}

// aten::lcm(Tensor self, Tensor other) -> Tensor
at::Tensor lcm::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_lcm_typed_handle();
    return op.call(self, other);
}

// aten::lcm(Tensor self, Tensor other) -> Tensor
at::Tensor lcm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_lcm_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lcm_::schema> create_lcm__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lcm_::name, lcm_::overload_name)
      .typed<lcm_::schema>();
}

// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & lcm_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_lcm__typed_handle();
    return op.call(self, other);
}

// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & lcm_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_lcm__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler::schema> create_grid_sampler_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(grid_sampler::name, grid_sampler::overload_name)
      .typed<grid_sampler::schema>();
}

// aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
at::Tensor grid_sampler::call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
    
    static auto op = create_grid_sampler_typed_handle();
    return op.call(input, grid, interpolation_mode, padding_mode, align_corners);
}

// aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
at::Tensor grid_sampler::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
    
    static auto op = create_grid_sampler_typed_handle();
    return op.redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners);
}

// aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_2d::schema> create_grid_sampler_2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(grid_sampler_2d::name, grid_sampler_2d::overload_name)
      .typed<grid_sampler_2d::schema>();
}

// aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
at::Tensor grid_sampler_2d::call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
    
    static auto op = create_grid_sampler_2d_typed_handle();
    return op.call(input, grid, interpolation_mode, padding_mode, align_corners);
}

// aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
at::Tensor grid_sampler_2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
    
    static auto op = create_grid_sampler_2d_typed_handle();
    return op.redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners);
}

// aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_2d_backward::schema> create_grid_sampler_2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(grid_sampler_2d_backward::name, grid_sampler_2d_backward::overload_name)
      .typed<grid_sampler_2d_backward::schema>();
}

// aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> grid_sampler_2d_backward::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
    
    static auto op = create_grid_sampler_2d_backward_typed_handle();
    return op.call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
}

// aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> grid_sampler_2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
    
    static auto op = create_grid_sampler_2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
}

// aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_grid_sampler_2d_cpu_fallback::schema> create__grid_sampler_2d_cpu_fallback_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_grid_sampler_2d_cpu_fallback::name, _grid_sampler_2d_cpu_fallback::overload_name)
      .typed<_grid_sampler_2d_cpu_fallback::schema>();
}

// aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
at::Tensor _grid_sampler_2d_cpu_fallback::call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
    
    static auto op = create__grid_sampler_2d_cpu_fallback_typed_handle();
    return op.call(input, grid, interpolation_mode, padding_mode, align_corners);
}

// aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
at::Tensor _grid_sampler_2d_cpu_fallback::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
    
    static auto op = create__grid_sampler_2d_cpu_fallback_typed_handle();
    return op.redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners);
}

// aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_grid_sampler_2d_cpu_fallback_backward::schema> create__grid_sampler_2d_cpu_fallback_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_grid_sampler_2d_cpu_fallback_backward::name, _grid_sampler_2d_cpu_fallback_backward::overload_name)
      .typed<_grid_sampler_2d_cpu_fallback_backward::schema>();
}

// aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _grid_sampler_2d_cpu_fallback_backward::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
    
    static auto op = create__grid_sampler_2d_cpu_fallback_backward_typed_handle();
    return op.call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners);
}

// aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _grid_sampler_2d_cpu_fallback_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
    
    static auto op = create__grid_sampler_2d_cpu_fallback_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners);
}

// aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_3d::schema> create_grid_sampler_3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(grid_sampler_3d::name, grid_sampler_3d::overload_name)
      .typed<grid_sampler_3d::schema>();
}

// aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
at::Tensor grid_sampler_3d::call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
    
    static auto op = create_grid_sampler_3d_typed_handle();
    return op.call(input, grid, interpolation_mode, padding_mode, align_corners);
}

// aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
at::Tensor grid_sampler_3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
    
    static auto op = create_grid_sampler_3d_typed_handle();
    return op.redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners);
}

// aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_3d_backward::schema> create_grid_sampler_3d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(grid_sampler_3d_backward::name, grid_sampler_3d_backward::overload_name)
      .typed<grid_sampler_3d_backward::schema>();
}

// aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> grid_sampler_3d_backward::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
    
    static auto op = create_grid_sampler_3d_backward_typed_handle();
    return op.call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
}

// aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> grid_sampler_3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
    
    static auto op = create_grid_sampler_3d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
}

// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hann_window::schema> create_hann_window_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hann_window::name, hann_window::overload_name)
      .typed<hann_window::schema>();
}

// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor hann_window::call(int64_t window_length, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_hann_window_typed_handle();
    return op.call(window_length, dtype, layout, device, pin_memory);
}

// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor hann_window::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_hann_window_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory);
}

// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hann_window_periodic::schema> create_hann_window_periodic_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hann_window_periodic::name, hann_window_periodic::overload_name)
      .typed<hann_window_periodic::schema>();
}

// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor hann_window_periodic::call(int64_t window_length, bool periodic, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_hann_window_periodic_typed_handle();
    return op.call(window_length, periodic, dtype, layout, device, pin_memory);
}

// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor hann_window_periodic::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_hann_window_periodic_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory);
}

// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hamming_window::schema> create_hamming_window_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hamming_window::name, hamming_window::overload_name)
      .typed<hamming_window::schema>();
}

// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor hamming_window::call(int64_t window_length, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_hamming_window_typed_handle();
    return op.call(window_length, dtype, layout, device, pin_memory);
}

// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor hamming_window::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_hamming_window_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory);
}

// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hamming_window_periodic::schema> create_hamming_window_periodic_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hamming_window_periodic::name, hamming_window_periodic::overload_name)
      .typed<hamming_window_periodic::schema>();
}

// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor hamming_window_periodic::call(int64_t window_length, bool periodic, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_hamming_window_periodic_typed_handle();
    return op.call(window_length, periodic, dtype, layout, device, pin_memory);
}

// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor hamming_window_periodic::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_hamming_window_periodic_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory);
}

// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hamming_window_periodic_alpha::schema> create_hamming_window_periodic_alpha_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hamming_window_periodic_alpha::name, hamming_window_periodic_alpha::overload_name)
      .typed<hamming_window_periodic_alpha::schema>();
}

// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor hamming_window_periodic_alpha::call(int64_t window_length, bool periodic, double alpha, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_hamming_window_periodic_alpha_typed_handle();
    return op.call(window_length, periodic, alpha, dtype, layout, device, pin_memory);
}

// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor hamming_window_periodic_alpha::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_hamming_window_periodic_alpha_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, alpha, dtype, layout, device, pin_memory);
}

// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hamming_window_periodic_alpha_beta::schema> create_hamming_window_periodic_alpha_beta_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hamming_window_periodic_alpha_beta::name, hamming_window_periodic_alpha_beta::overload_name)
      .typed<hamming_window_periodic_alpha_beta::schema>();
}

// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor hamming_window_periodic_alpha_beta::call(int64_t window_length, bool periodic, double alpha, double beta, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_hamming_window_periodic_alpha_beta_typed_handle();
    return op.call(window_length, periodic, alpha, beta, dtype, layout, device, pin_memory);
}

// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor hamming_window_periodic_alpha_beta::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_hamming_window_periodic_alpha_beta_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, alpha, beta, dtype, layout, device, pin_memory);
}

// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<kaiser_window::schema> create_kaiser_window_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(kaiser_window::name, kaiser_window::overload_name)
      .typed<kaiser_window::schema>();
}

// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor kaiser_window::call(int64_t window_length, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_kaiser_window_typed_handle();
    return op.call(window_length, dtype, layout, device, pin_memory);
}

// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor kaiser_window::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_kaiser_window_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory);
}

// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<kaiser_window_periodic::schema> create_kaiser_window_periodic_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(kaiser_window_periodic::name, kaiser_window_periodic::overload_name)
      .typed<kaiser_window_periodic::schema>();
}

// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor kaiser_window_periodic::call(int64_t window_length, bool periodic, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_kaiser_window_periodic_typed_handle();
    return op.call(window_length, periodic, dtype, layout, device, pin_memory);
}

// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor kaiser_window_periodic::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_kaiser_window_periodic_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory);
}

// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<kaiser_window_beta::schema> create_kaiser_window_beta_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(kaiser_window_beta::name, kaiser_window_beta::overload_name)
      .typed<kaiser_window_beta::schema>();
}

// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor kaiser_window_beta::call(int64_t window_length, bool periodic, double beta, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_kaiser_window_beta_typed_handle();
    return op.call(window_length, periodic, beta, dtype, layout, device, pin_memory);
}

// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor kaiser_window_beta::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_kaiser_window_beta_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, beta, dtype, layout, device, pin_memory);
}

// aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hinge_embedding_loss::schema> create_hinge_embedding_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hinge_embedding_loss::name, hinge_embedding_loss::overload_name)
      .typed<hinge_embedding_loss::schema>();
}

// aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor
at::Tensor hinge_embedding_loss::call(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) {
    
    static auto op = create_hinge_embedding_loss_typed_handle();
    return op.call(self, target, margin, reduction);
}

// aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor
at::Tensor hinge_embedding_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) {
    
    static auto op = create_hinge_embedding_loss_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, margin, reduction);
}

// aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<group_norm::schema> create_group_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(group_norm::name, group_norm::overload_name)
      .typed<group_norm::schema>();
}

// aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor
at::Tensor group_norm::call(const at::Tensor & input, int64_t num_groups, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps, bool cudnn_enabled) {
    
    static auto op = create_group_norm_typed_handle();
    return op.call(input, num_groups, weight, bias, eps, cudnn_enabled);
}

// aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor
at::Tensor group_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t num_groups, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps, bool cudnn_enabled) {
    
    static auto op = create_group_norm_typed_handle();
    return op.redispatch(dispatchKeySet, input, num_groups, weight, bias, eps, cudnn_enabled);
}

// aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<native_group_norm::schema> create_native_group_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_group_norm::name, native_group_norm::overload_name)
      .typed<native_group_norm::schema>();
}

// aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
    
    static auto op = create_native_group_norm_typed_handle();
    return op.call(input, weight, bias, N, C, HxW, group, eps);
}

// aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
    
    static auto op = create_native_group_norm_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps);
}

// aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<native_group_norm_backward::schema> create_native_group_norm_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_group_norm_backward::name, native_group_norm_backward::overload_name)
      .typed<native_group_norm_backward::schema>();
}

// aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_backward::call(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) {
    
    static auto op = create_native_group_norm_backward_typed_handle();
    return op.call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask);
}

// aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) {
    
    static auto op = create_native_group_norm_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask);
}

// aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_fft_r2c::schema> create__fft_r2c_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fft_r2c::name, _fft_r2c::overload_name)
      .typed<_fft_r2c::schema>();
}

// aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor
at::Tensor _fft_r2c::call(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
    
    static auto op = create__fft_r2c_typed_handle();
    return op.call(self, dim, normalization, onesided);
}

// aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor
at::Tensor _fft_r2c::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
    
    static auto op = create__fft_r2c_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, normalization, onesided);
}

// aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_fft_r2c_out::schema> create__fft_r2c_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fft_r2c_out::name, _fft_r2c_out::overload_name)
      .typed<_fft_r2c_out::schema>();
}

// aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _fft_r2c_out::call(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out) {
    
    static auto op = create__fft_r2c_out_typed_handle();
    return op.call(self, dim, normalization, onesided, out);
}

// aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _fft_r2c_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out) {
    
    static auto op = create__fft_r2c_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, normalization, onesided, out);
}

// aten::_fft_c2r(Tensor self, int[] dim, int normalization, SymInt last_dim_size) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_fft_c2r::schema> create__fft_c2r_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fft_c2r::name, _fft_c2r::overload_name)
      .typed<_fft_c2r::schema>();
}

// aten::_fft_c2r(Tensor self, int[] dim, int normalization, SymInt last_dim_size) -> Tensor
at::Tensor _fft_c2r::call(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size) {
    
    static auto op = create__fft_c2r_typed_handle();
    return op.call(self, dim, normalization, last_dim_size);
}

// aten::_fft_c2r(Tensor self, int[] dim, int normalization, SymInt last_dim_size) -> Tensor
at::Tensor _fft_c2r::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size) {
    
    static auto op = create__fft_c2r_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, normalization, last_dim_size);
}

// aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, SymInt last_dim_size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_fft_c2r_out::schema> create__fft_c2r_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fft_c2r_out::name, _fft_c2r_out::overload_name)
      .typed<_fft_c2r_out::schema>();
}

// aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, SymInt last_dim_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _fft_c2r_out::call(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size, at::Tensor & out) {
    
    static auto op = create__fft_c2r_out_typed_handle();
    return op.call(self, dim, normalization, last_dim_size, out);
}

// aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, SymInt last_dim_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _fft_c2r_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size, at::Tensor & out) {
    
    static auto op = create__fft_c2r_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, normalization, last_dim_size, out);
}

// aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_fft_c2c::schema> create__fft_c2c_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fft_c2c::name, _fft_c2c::overload_name)
      .typed<_fft_c2c::schema>();
}

// aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor
at::Tensor _fft_c2c::call(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
    
    static auto op = create__fft_c2c_typed_handle();
    return op.call(self, dim, normalization, forward);
}

// aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor
at::Tensor _fft_c2c::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
    
    static auto op = create__fft_c2c_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, normalization, forward);
}

// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_fft_c2c_out::schema> create__fft_c2c_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fft_c2c_out::name, _fft_c2c_out::overload_name)
      .typed<_fft_c2c_out::schema>();
}

// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _fft_c2c_out::call(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) {
    
    static auto op = create__fft_c2c_out_typed_handle();
    return op.call(self, dim, normalization, forward, out);
}

// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _fft_c2c_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) {
    
    static auto op = create__fft_c2c_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, normalization, forward, out);
}

// aten::_validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_validate_compressed_sparse_indices::schema> create__validate_compressed_sparse_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_validate_compressed_sparse_indices::name, _validate_compressed_sparse_indices::overload_name)
      .typed<_validate_compressed_sparse_indices::schema>();
}

// aten::_validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()
void _validate_compressed_sparse_indices::call(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) {
    
    static auto op = create__validate_compressed_sparse_indices_typed_handle();
    return op.call(is_crow, compressed_idx, plain_idx, cdim, dim, nnz);
}

// aten::_validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()
void _validate_compressed_sparse_indices::redispatch(c10::DispatchKeySet dispatchKeySet, bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) {
    
    static auto op = create__validate_compressed_sparse_indices_typed_handle();
    return op.redispatch(dispatchKeySet, is_crow, compressed_idx, plain_idx, cdim, dim, nnz);
}

// aten::_cufft_get_plan_cache_size(DeviceIndex device_index) -> int
static C10_NOINLINE c10::TypedOperatorHandle<_cufft_get_plan_cache_size::schema> create__cufft_get_plan_cache_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cufft_get_plan_cache_size::name, _cufft_get_plan_cache_size::overload_name)
      .typed<_cufft_get_plan_cache_size::schema>();
}

// aten::_cufft_get_plan_cache_size(DeviceIndex device_index) -> int
int64_t _cufft_get_plan_cache_size::call(at::DeviceIndex device_index) {
    
    static auto op = create__cufft_get_plan_cache_size_typed_handle();
    return op.call(device_index);
}

// aten::_cufft_get_plan_cache_size(DeviceIndex device_index) -> int
int64_t _cufft_get_plan_cache_size::redispatch(c10::DispatchKeySet dispatchKeySet, at::DeviceIndex device_index) {
    
    static auto op = create__cufft_get_plan_cache_size_typed_handle();
    return op.redispatch(dispatchKeySet, device_index);
}

// aten::_cufft_get_plan_cache_max_size(DeviceIndex device_index) -> int
static C10_NOINLINE c10::TypedOperatorHandle<_cufft_get_plan_cache_max_size::schema> create__cufft_get_plan_cache_max_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cufft_get_plan_cache_max_size::name, _cufft_get_plan_cache_max_size::overload_name)
      .typed<_cufft_get_plan_cache_max_size::schema>();
}

// aten::_cufft_get_plan_cache_max_size(DeviceIndex device_index) -> int
int64_t _cufft_get_plan_cache_max_size::call(at::DeviceIndex device_index) {
    
    static auto op = create__cufft_get_plan_cache_max_size_typed_handle();
    return op.call(device_index);
}

// aten::_cufft_get_plan_cache_max_size(DeviceIndex device_index) -> int
int64_t _cufft_get_plan_cache_max_size::redispatch(c10::DispatchKeySet dispatchKeySet, at::DeviceIndex device_index) {
    
    static auto op = create__cufft_get_plan_cache_max_size_typed_handle();
    return op.redispatch(dispatchKeySet, device_index);
}

// aten::_cufft_set_plan_cache_max_size(DeviceIndex device_index, int max_size) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_cufft_set_plan_cache_max_size::schema> create__cufft_set_plan_cache_max_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cufft_set_plan_cache_max_size::name, _cufft_set_plan_cache_max_size::overload_name)
      .typed<_cufft_set_plan_cache_max_size::schema>();
}

// aten::_cufft_set_plan_cache_max_size(DeviceIndex device_index, int max_size) -> ()
void _cufft_set_plan_cache_max_size::call(at::DeviceIndex device_index, int64_t max_size) {
    
    static auto op = create__cufft_set_plan_cache_max_size_typed_handle();
    return op.call(device_index, max_size);
}

// aten::_cufft_set_plan_cache_max_size(DeviceIndex device_index, int max_size) -> ()
void _cufft_set_plan_cache_max_size::redispatch(c10::DispatchKeySet dispatchKeySet, at::DeviceIndex device_index, int64_t max_size) {
    
    static auto op = create__cufft_set_plan_cache_max_size_typed_handle();
    return op.redispatch(dispatchKeySet, device_index, max_size);
}

// aten::_cufft_clear_plan_cache(DeviceIndex device_index) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_cufft_clear_plan_cache::schema> create__cufft_clear_plan_cache_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cufft_clear_plan_cache::name, _cufft_clear_plan_cache::overload_name)
      .typed<_cufft_clear_plan_cache::schema>();
}

// aten::_cufft_clear_plan_cache(DeviceIndex device_index) -> ()
void _cufft_clear_plan_cache::call(at::DeviceIndex device_index) {
    
    static auto op = create__cufft_clear_plan_cache_typed_handle();
    return op.call(device_index);
}

// aten::_cufft_clear_plan_cache(DeviceIndex device_index) -> ()
void _cufft_clear_plan_cache::redispatch(c10::DispatchKeySet dispatchKeySet, at::DeviceIndex device_index) {
    
    static auto op = create__cufft_clear_plan_cache_typed_handle();
    return op.redispatch(dispatchKeySet, device_index);
}

// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<index_Tensor::schema> create_index_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_Tensor::name, index_Tensor::overload_name)
      .typed<index_Tensor::schema>();
}

// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
at::Tensor index_Tensor::call(const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices) {
    
    static auto op = create_index_Tensor_typed_handle();
    return op.call(self, indices);
}

// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
at::Tensor index_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices) {
    
    static auto op = create_index_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices);
}

// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_Tensor_out::schema> create_index_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_Tensor_out::name, index_Tensor_out::overload_name)
      .typed<index_Tensor_out::schema>();
}

// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_Tensor_out::call(const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, at::Tensor & out) {
    
    static auto op = create_index_Tensor_out_typed_handle();
    return op.call(self, indices, out);
}

// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, at::Tensor & out) {
    
    static auto op = create_index_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, out);
}

// aten::_unsafe_index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_unsafe_index_Tensor::schema> create__unsafe_index_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_unsafe_index_Tensor::name, _unsafe_index_Tensor::overload_name)
      .typed<_unsafe_index_Tensor::schema>();
}

// aten::_unsafe_index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
at::Tensor _unsafe_index_Tensor::call(const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices) {
    
    static auto op = create__unsafe_index_Tensor_typed_handle();
    return op.call(self, indices);
}

// aten::_unsafe_index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
at::Tensor _unsafe_index_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices) {
    
    static auto op = create__unsafe_index_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices);
}

// aten::_unsafe_masked_index(Tensor self, Tensor mask, Tensor?[] indices, Scalar fill) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_unsafe_masked_index::schema> create__unsafe_masked_index_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_unsafe_masked_index::name, _unsafe_masked_index::overload_name)
      .typed<_unsafe_masked_index::schema>();
}

// aten::_unsafe_masked_index(Tensor self, Tensor mask, Tensor?[] indices, Scalar fill) -> Tensor
at::Tensor _unsafe_masked_index::call(const at::Tensor & self, const at::Tensor & mask, const c10::List<::std::optional<at::Tensor>> & indices, const at::Scalar & fill) {
    
    static auto op = create__unsafe_masked_index_typed_handle();
    return op.call(self, mask, indices, fill);
}

// aten::_unsafe_masked_index(Tensor self, Tensor mask, Tensor?[] indices, Scalar fill) -> Tensor
at::Tensor _unsafe_masked_index::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const c10::List<::std::optional<at::Tensor>> & indices, const at::Scalar & fill) {
    
    static auto op = create__unsafe_masked_index_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, indices, fill);
}

// aten::_unsafe_masked_index_put_accumulate(Tensor self, Tensor mask, Tensor?[] indices, Tensor values) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_unsafe_masked_index_put_accumulate::schema> create__unsafe_masked_index_put_accumulate_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_unsafe_masked_index_put_accumulate::name, _unsafe_masked_index_put_accumulate::overload_name)
      .typed<_unsafe_masked_index_put_accumulate::schema>();
}

// aten::_unsafe_masked_index_put_accumulate(Tensor self, Tensor mask, Tensor?[] indices, Tensor values) -> Tensor
at::Tensor _unsafe_masked_index_put_accumulate::call(const at::Tensor & self, const at::Tensor & mask, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values) {
    
    static auto op = create__unsafe_masked_index_put_accumulate_typed_handle();
    return op.call(self, mask, indices, values);
}

// aten::_unsafe_masked_index_put_accumulate(Tensor self, Tensor mask, Tensor?[] indices, Tensor values) -> Tensor
at::Tensor _unsafe_masked_index_put_accumulate::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values) {
    
    static auto op = create__unsafe_masked_index_put_accumulate_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, indices, values);
}

// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_copy_out::schema> create_index_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_copy_out::name, index_copy_out::overload_name)
      .typed<index_copy_out::schema>();
}

// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_copy_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out) {
    
    static auto op = create_index_copy_out_typed_handle();
    return op.call(self, dim, index, source, out);
}

// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out) {
    
    static auto op = create_index_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, source, out);
}

// aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_copy_::schema> create_index_copy__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_copy_::name, index_copy_::overload_name)
      .typed<index_copy_::schema>();
}

// aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)
at::Tensor & index_copy_::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
    
    static auto op = create_index_copy__typed_handle();
    return op.call(self, dim, index, source);
}

// aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)
at::Tensor & index_copy_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
    
    static auto op = create_index_copy__typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, source);
}

// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<index_copy::schema> create_index_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_copy::name, index_copy::overload_name)
      .typed<index_copy::schema>();
}

// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor
at::Tensor index_copy::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
    
    static auto op = create_index_copy_typed_handle();
    return op.call(self, dim, index, source);
}

// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor
at::Tensor index_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
    
    static auto op = create_index_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, source);
}

// aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_copy__dimname::schema> create_index_copy__dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_copy__dimname::name, index_copy__dimname::overload_name)
      .typed<index_copy__dimname::schema>();
}

// aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)
at::Tensor & index_copy__dimname::call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
    
    static auto op = create_index_copy__dimname_typed_handle();
    return op.call(self, dim, index, source);
}

// aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)
at::Tensor & index_copy__dimname::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
    
    static auto op = create_index_copy__dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, source);
}

// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<index_copy_dimname::schema> create_index_copy_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_copy_dimname::name, index_copy_dimname::overload_name)
      .typed<index_copy_dimname::schema>();
}

// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor
at::Tensor index_copy_dimname::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
    
    static auto op = create_index_copy_dimname_typed_handle();
    return op.call(self, dim, index, source);
}

// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor
at::Tensor index_copy_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
    
    static auto op = create_index_copy_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, source);
}

// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_put_::schema> create_index_put__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_put_::name, index_put_::overload_name)
      .typed<index_put_::schema>();
}

// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)
at::Tensor & index_put_::call(at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
    
    static auto op = create_index_put__typed_handle();
    return op.call(self, indices, values, accumulate);
}

// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)
at::Tensor & index_put_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
    
    static auto op = create_index_put__typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, values, accumulate);
}

// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<index_put::schema> create_index_put_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_put::name, index_put::overload_name)
      .typed<index_put::schema>();
}

// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
at::Tensor index_put::call(const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
    
    static auto op = create_index_put_typed_handle();
    return op.call(self, indices, values, accumulate);
}

// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
at::Tensor index_put::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
    
    static auto op = create_index_put_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, values, accumulate);
}

// aten::_unsafe_index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_unsafe_index_put::schema> create__unsafe_index_put_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_unsafe_index_put::name, _unsafe_index_put::overload_name)
      .typed<_unsafe_index_put::schema>();
}

// aten::_unsafe_index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
at::Tensor _unsafe_index_put::call(const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
    
    static auto op = create__unsafe_index_put_typed_handle();
    return op.call(self, indices, values, accumulate);
}

// aten::_unsafe_index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
at::Tensor _unsafe_index_put::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
    
    static auto op = create__unsafe_index_put_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, values, accumulate);
}

// aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_index_put_impl_::schema> create__index_put_impl__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_index_put_impl_::name, _index_put_impl_::overload_name)
      .typed<_index_put_impl_::schema>();
}

// aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)
at::Tensor & _index_put_impl_::call(at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
    
    static auto op = create__index_put_impl__typed_handle();
    return op.call(self, indices, values, accumulate, unsafe);
}

// aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)
at::Tensor & _index_put_impl_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
    
    static auto op = create__index_put_impl__typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, values, accumulate, unsafe);
}

// aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<instance_norm::schema> create_instance_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(instance_norm::name, instance_norm::overload_name)
      .typed<instance_norm::schema>();
}

// aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor
at::Tensor instance_norm::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
    
    static auto op = create_instance_norm_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled);
}

// aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor
at::Tensor instance_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
    
    static auto op = create_instance_norm_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled);
}

// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<isclose::schema> create_isclose_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isclose::name, isclose::overload_name)
      .typed<isclose::schema>();
}

// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor
at::Tensor isclose::call(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) {
    
    static auto op = create_isclose_typed_handle();
    return op.call(self, other, rtol, atol, equal_nan);
}

// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor
at::Tensor isclose::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) {
    
    static auto op = create_isclose_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, rtol, atol, equal_nan);
}

// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<isin_Tensor_Tensor_out::schema> create_isin_Tensor_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isin_Tensor_Tensor_out::name, isin_Tensor_Tensor_out::overload_name)
      .typed<isin_Tensor_Tensor_out::schema>();
}

// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isin_Tensor_Tensor_out::call(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
    
    static auto op = create_isin_Tensor_Tensor_out_typed_handle();
    return op.call(elements, test_elements, assume_unique, invert, out);
}

// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isin_Tensor_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
    
    static auto op = create_isin_Tensor_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, elements, test_elements, assume_unique, invert, out);
}

// aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<isin_Tensor_Tensor::schema> create_isin_Tensor_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isin_Tensor_Tensor::name, isin_Tensor_Tensor::overload_name)
      .typed<isin_Tensor_Tensor::schema>();
}

// aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
at::Tensor isin_Tensor_Tensor::call(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
    
    static auto op = create_isin_Tensor_Tensor_typed_handle();
    return op.call(elements, test_elements, assume_unique, invert);
}

// aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
at::Tensor isin_Tensor_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
    
    static auto op = create_isin_Tensor_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, elements, test_elements, assume_unique, invert);
}

// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<isin_Tensor_Scalar_out::schema> create_isin_Tensor_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isin_Tensor_Scalar_out::name, isin_Tensor_Scalar_out::overload_name)
      .typed<isin_Tensor_Scalar_out::schema>();
}

// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isin_Tensor_Scalar_out::call(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) {
    
    static auto op = create_isin_Tensor_Scalar_out_typed_handle();
    return op.call(elements, test_element, assume_unique, invert, out);
}

// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isin_Tensor_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) {
    
    static auto op = create_isin_Tensor_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, elements, test_element, assume_unique, invert, out);
}

// aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<isin_Tensor_Scalar::schema> create_isin_Tensor_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isin_Tensor_Scalar::name, isin_Tensor_Scalar::overload_name)
      .typed<isin_Tensor_Scalar::schema>();
}

// aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor
at::Tensor isin_Tensor_Scalar::call(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
    
    static auto op = create_isin_Tensor_Scalar_typed_handle();
    return op.call(elements, test_element, assume_unique, invert);
}

// aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor
at::Tensor isin_Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
    
    static auto op = create_isin_Tensor_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, elements, test_element, assume_unique, invert);
}

// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<isin_Scalar_Tensor_out::schema> create_isin_Scalar_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isin_Scalar_Tensor_out::name, isin_Scalar_Tensor_out::overload_name)
      .typed<isin_Scalar_Tensor_out::schema>();
}

// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isin_Scalar_Tensor_out::call(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
    
    static auto op = create_isin_Scalar_Tensor_out_typed_handle();
    return op.call(element, test_elements, assume_unique, invert, out);
}

// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isin_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
    
    static auto op = create_isin_Scalar_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, element, test_elements, assume_unique, invert, out);
}

// aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<isin_Scalar_Tensor::schema> create_isin_Scalar_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isin_Scalar_Tensor::name, isin_Scalar_Tensor::overload_name)
      .typed<isin_Scalar_Tensor::schema>();
}

// aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
at::Tensor isin_Scalar_Tensor::call(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
    
    static auto op = create_isin_Scalar_Tensor_typed_handle();
    return op.call(element, test_elements, assume_unique, invert);
}

// aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
at::Tensor isin_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
    
    static auto op = create_isin_Scalar_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, element, test_elements, assume_unique, invert);
}

// aten::isnan(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<isnan::schema> create_isnan_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isnan::name, isnan::overload_name)
      .typed<isnan::schema>();
}

// aten::isnan(Tensor self) -> Tensor
at::Tensor isnan::call(const at::Tensor & self) {
    
    static auto op = create_isnan_typed_handle();
    return op.call(self);
}

// aten::isnan(Tensor self) -> Tensor
at::Tensor isnan::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_isnan_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::is_distributed(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_distributed::schema> create_is_distributed_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_distributed::name, is_distributed::overload_name)
      .typed<is_distributed::schema>();
}

// aten::is_distributed(Tensor self) -> bool
bool is_distributed::call(const at::Tensor & self) {
    
    static auto op = create_is_distributed_typed_handle();
    return op.call(self);
}

// aten::is_distributed(Tensor self) -> bool
bool is_distributed::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_is_distributed_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::is_floating_point(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_floating_point::schema> create_is_floating_point_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_floating_point::name, is_floating_point::overload_name)
      .typed<is_floating_point::schema>();
}

// aten::is_floating_point(Tensor self) -> bool
bool is_floating_point::call(const at::Tensor & self) {
    
    static auto op = create_is_floating_point_typed_handle();
    return op.call(self);
}

// aten::is_floating_point(Tensor self) -> bool
bool is_floating_point::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_is_floating_point_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::is_complex(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_complex::schema> create_is_complex_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_complex::name, is_complex::overload_name)
      .typed<is_complex::schema>();
}

// aten::is_complex(Tensor self) -> bool
bool is_complex::call(const at::Tensor & self) {
    
    static auto op = create_is_complex_typed_handle();
    return op.call(self);
}

// aten::is_complex(Tensor self) -> bool
bool is_complex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_is_complex_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::is_conj(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_conj::schema> create_is_conj_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_conj::name, is_conj::overload_name)
      .typed<is_conj::schema>();
}

// aten::is_conj(Tensor self) -> bool
bool is_conj::call(const at::Tensor & self) {
    
    static auto op = create_is_conj_typed_handle();
    return op.call(self);
}

// aten::is_conj(Tensor self) -> bool
bool is_conj::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_is_conj_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_is_zerotensor(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<_is_zerotensor::schema> create__is_zerotensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_is_zerotensor::name, _is_zerotensor::overload_name)
      .typed<_is_zerotensor::schema>();
}

// aten::_is_zerotensor(Tensor self) -> bool
bool _is_zerotensor::call(const at::Tensor & self) {
    
    static auto op = create__is_zerotensor_typed_handle();
    return op.call(self);
}

// aten::_is_zerotensor(Tensor self) -> bool
bool _is_zerotensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__is_zerotensor_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::is_neg(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_neg::schema> create_is_neg_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_neg::name, is_neg::overload_name)
      .typed<is_neg::schema>();
}

// aten::is_neg(Tensor self) -> bool
bool is_neg::call(const at::Tensor & self) {
    
    static auto op = create_is_neg_typed_handle();
    return op.call(self);
}

// aten::is_neg(Tensor self) -> bool
bool is_neg::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_is_neg_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::isreal(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<isreal::schema> create_isreal_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isreal::name, isreal::overload_name)
      .typed<isreal::schema>();
}

// aten::isreal(Tensor self) -> Tensor
at::Tensor isreal::call(const at::Tensor & self) {
    
    static auto op = create_isreal_typed_handle();
    return op.call(self);
}

// aten::isreal(Tensor self) -> Tensor
at::Tensor isreal::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_isreal_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::is_nonzero(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_nonzero::schema> create_is_nonzero_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_nonzero::name, is_nonzero::overload_name)
      .typed<is_nonzero::schema>();
}

// aten::is_nonzero(Tensor self) -> bool
bool is_nonzero::call(const at::Tensor & self) {
    
    static auto op = create_is_nonzero_typed_handle();
    return op.call(self);
}

// aten::is_nonzero(Tensor self) -> bool
bool is_nonzero::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_is_nonzero_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::is_same_size(Tensor self, Tensor other) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_same_size::schema> create_is_same_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_same_size::name, is_same_size::overload_name)
      .typed<is_same_size::schema>();
}

// aten::is_same_size(Tensor self, Tensor other) -> bool
bool is_same_size::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_is_same_size_typed_handle();
    return op.call(self, other);
}

// aten::is_same_size(Tensor self, Tensor other) -> bool
bool is_same_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_is_same_size_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::is_signed(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_signed::schema> create_is_signed_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_signed::name, is_signed::overload_name)
      .typed<is_signed::schema>();
}

// aten::is_signed(Tensor self) -> bool
bool is_signed::call(const at::Tensor & self) {
    
    static auto op = create_is_signed_typed_handle();
    return op.call(self);
}

// aten::is_signed(Tensor self) -> bool
bool is_signed::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_is_signed_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::is_inference(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_inference::schema> create_is_inference_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_inference::name, is_inference::overload_name)
      .typed<is_inference::schema>();
}

// aten::is_inference(Tensor self) -> bool
bool is_inference::call(const at::Tensor & self) {
    
    static auto op = create_is_inference_typed_handle();
    return op.call(self);
}

// aten::is_inference(Tensor self) -> bool
bool is_inference::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_is_inference_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<kl_div::schema> create_kl_div_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(kl_div::name, kl_div::overload_name)
      .typed<kl_div::schema>();
}

// aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
at::Tensor kl_div::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target) {
    
    static auto op = create_kl_div_typed_handle();
    return op.call(self, target, reduction, log_target);
}

// aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
at::Tensor kl_div::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target) {
    
    static auto op = create_kl_div_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction, log_target);
}

// aten::kron(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<kron::schema> create_kron_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(kron::name, kron::overload_name)
      .typed<kron::schema>();
}

// aten::kron(Tensor self, Tensor other) -> Tensor
at::Tensor kron::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_kron_typed_handle();
    return op.call(self, other);
}

// aten::kron(Tensor self, Tensor other) -> Tensor
at::Tensor kron::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_kron_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<kron_out::schema> create_kron_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(kron_out::name, kron_out::overload_name)
      .typed<kron_out::schema>();
}

// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & kron_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_kron_out_typed_handle();
    return op.call(self, other, out);
}

// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & kron_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_kron_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<kthvalue::schema> create_kthvalue_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(kthvalue::name, kthvalue::overload_name)
      .typed<kthvalue::schema>();
}

// aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> kthvalue::call(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) {
    
    static auto op = create_kthvalue_typed_handle();
    return op.call(self, k, dim, keepdim);
}

// aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> kthvalue::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) {
    
    static auto op = create_kthvalue_typed_handle();
    return op.redispatch(dispatchKeySet, self, k, dim, keepdim);
}

// aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<kthvalue_values::schema> create_kthvalue_values_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(kthvalue_values::name, kthvalue_values::overload_name)
      .typed<kthvalue_values::schema>();
}

// aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> kthvalue_values::call(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_kthvalue_values_typed_handle();
    return op.call(self, k, dim, keepdim, values, indices);
}

// aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> kthvalue_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_kthvalue_values_typed_handle();
    return op.redispatch(dispatchKeySet, self, k, dim, keepdim, values, indices);
}

// aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<kthvalue_dimname::schema> create_kthvalue_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(kthvalue_dimname::name, kthvalue_dimname::overload_name)
      .typed<kthvalue_dimname::schema>();
}

// aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> kthvalue_dimname::call(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) {
    
    static auto op = create_kthvalue_dimname_typed_handle();
    return op.call(self, k, dim, keepdim);
}

// aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> kthvalue_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) {
    
    static auto op = create_kthvalue_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, k, dim, keepdim);
}

// aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<kthvalue_dimname_out::schema> create_kthvalue_dimname_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(kthvalue_dimname_out::name, kthvalue_dimname_out::overload_name)
      .typed<kthvalue_dimname_out::schema>();
}

// aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> kthvalue_dimname_out::call(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_kthvalue_dimname_out_typed_handle();
    return op.call(self, k, dim, keepdim, values, indices);
}

// aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> kthvalue_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_kthvalue_dimname_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, k, dim, keepdim, values, indices);
}

// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<layer_norm::schema> create_layer_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(layer_norm::name, layer_norm::overload_name)
      .typed<layer_norm::schema>();
}

// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
at::Tensor layer_norm::call(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps, bool cudnn_enable) {
    
    static auto op = create_layer_norm_typed_handle();
    return op.call(input, normalized_shape, weight, bias, eps, cudnn_enable);
}

// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
at::Tensor layer_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps, bool cudnn_enable) {
    
    static auto op = create_layer_norm_typed_handle();
    return op.redispatch(dispatchKeySet, input, normalized_shape, weight, bias, eps, cudnn_enable);
}

// aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<native_layer_norm::schema> create_native_layer_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_layer_norm::name, native_layer_norm::overload_name)
      .typed<native_layer_norm::schema>();
}

// aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm::call(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps) {
    
    static auto op = create_native_layer_norm_typed_handle();
    return op.call(input, normalized_shape, weight, bias, eps);
}

// aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps) {
    
    static auto op = create_native_layer_norm_typed_handle();
    return op.redispatch(dispatchKeySet, input, normalized_shape, weight, bias, eps);
}

// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<native_layer_norm_backward::schema> create_native_layer_norm_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_layer_norm_backward::name, native_layer_norm_backward::overload_name)
      .typed<native_layer_norm_backward::schema>();
}

// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward::call(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
    
    static auto op = create_native_layer_norm_backward_typed_handle();
    return op.call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
}

// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
    
    static auto op = create_native_layer_norm_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
}

// aten::rms_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, float? eps=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rms_norm::schema> create_rms_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rms_norm::name, rms_norm::overload_name)
      .typed<rms_norm::schema>();
}

// aten::rms_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, float? eps=None) -> Tensor
at::Tensor rms_norm::call(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, ::std::optional<double> eps) {
    
    static auto op = create_rms_norm_typed_handle();
    return op.call(input, normalized_shape, weight, eps);
}

// aten::rms_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, float? eps=None) -> Tensor
at::Tensor rms_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, ::std::optional<double> eps) {
    
    static auto op = create_rms_norm_typed_handle();
    return op.redispatch(dispatchKeySet, input, normalized_shape, weight, eps);
}

// aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nan_to_num::schema> create_nan_to_num_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nan_to_num::name, nan_to_num::overload_name)
      .typed<nan_to_num::schema>();
}

// aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
at::Tensor nan_to_num::call(const at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf) {
    
    static auto op = create_nan_to_num_typed_handle();
    return op.call(self, nan, posinf, neginf);
}

// aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
at::Tensor nan_to_num::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf) {
    
    static auto op = create_nan_to_num_typed_handle();
    return op.redispatch(dispatchKeySet, self, nan, posinf, neginf);
}

// aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nan_to_num_::schema> create_nan_to_num__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nan_to_num_::name, nan_to_num_::overload_name)
      .typed<nan_to_num_::schema>();
}

// aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)
at::Tensor & nan_to_num_::call(at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf) {
    
    static auto op = create_nan_to_num__typed_handle();
    return op.call(self, nan, posinf, neginf);
}

// aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)
at::Tensor & nan_to_num_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf) {
    
    static auto op = create_nan_to_num__typed_handle();
    return op.redispatch(dispatchKeySet, self, nan, posinf, neginf);
}

// aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nan_to_num_out::schema> create_nan_to_num_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nan_to_num_out::name, nan_to_num_out::overload_name)
      .typed<nan_to_num_out::schema>();
}

// aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nan_to_num_out::call(const at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf, at::Tensor & out) {
    
    static auto op = create_nan_to_num_out_typed_handle();
    return op.call(self, nan, posinf, neginf, out);
}

// aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nan_to_num_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf, at::Tensor & out) {
    
    static auto op = create_nan_to_num_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, nan, posinf, neginf, out);
}

// aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linear::schema> create_linear_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linear::name, linear::overload_name)
      .typed<linear::schema>();
}

// aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor
at::Tensor linear::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias) {
    
    static auto op = create_linear_typed_handle();
    return op.call(input, weight, bias);
}

// aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor
at::Tensor linear::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias) {
    
    static auto op = create_linear_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias);
}

// aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<linear_backward::schema> create_linear_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linear_backward::name, linear_backward::overload_name)
      .typed<linear_backward::schema>();
}

// aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linear_backward::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
    
    static auto op = create_linear_backward_typed_handle();
    return op.call(self, grad_output, weight, output_mask);
}

// aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linear_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
    
    static auto op = create_linear_backward_typed_handle();
    return op.redispatch(dispatchKeySet, self, grad_output, weight, output_mask);
}

// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linear_out::schema> create_linear_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linear_out::name, linear_out::overload_name)
      .typed<linear_out::schema>();
}

// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linear_out::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & out) {
    
    static auto op = create_linear_out_typed_handle();
    return op.call(input, weight, bias, out);
}

// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linear_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & out) {
    
    static auto op = create_linear_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, out);
}

// aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear::schema> create_mkldnn_linear_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_linear::name, mkldnn_linear::overload_name)
      .typed<mkldnn_linear::schema>();
}

// aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor
at::Tensor mkldnn_linear::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias) {
    
    static auto op = create_mkldnn_linear_typed_handle();
    return op.call(self, weight, bias);
}

// aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor
at::Tensor mkldnn_linear::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias) {
    
    static auto op = create_mkldnn_linear_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias);
}

// aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear_backward_input::schema> create_mkldnn_linear_backward_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_linear_backward_input::name, mkldnn_linear_backward_input::overload_name)
      .typed<mkldnn_linear_backward_input::schema>();
}

// aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor
at::Tensor mkldnn_linear_backward_input::call(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) {
    
    static auto op = create_mkldnn_linear_backward_input_typed_handle();
    return op.call(input_size, grad_output, weight);
}

// aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor
at::Tensor mkldnn_linear_backward_input::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) {
    
    static auto op = create_mkldnn_linear_backward_input_typed_handle();
    return op.redispatch(dispatchKeySet, input_size, grad_output, weight);
}

// aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear_backward_weights::schema> create_mkldnn_linear_backward_weights_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_linear_backward_weights::name, mkldnn_linear_backward_weights::overload_name)
      .typed<mkldnn_linear_backward_weights::schema>();
}

// aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> mkldnn_linear_backward_weights::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) {
    
    static auto op = create_mkldnn_linear_backward_weights_typed_handle();
    return op.call(grad_output, input, weight, bias_defined);
}

// aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> mkldnn_linear_backward_weights::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) {
    
    static auto op = create_mkldnn_linear_backward_weights_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input, weight, bias_defined);
}

// aten::mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear_backward::schema> create_mkldnn_linear_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_linear_backward::name, mkldnn_linear_backward::overload_name)
      .typed<mkldnn_linear_backward::schema>();
}

// aten::mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> mkldnn_linear_backward::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
    
    static auto op = create_mkldnn_linear_backward_typed_handle();
    return op.call(self, grad_output, weight, output_mask);
}

// aten::mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> mkldnn_linear_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
    
    static auto op = create_mkldnn_linear_backward_typed_handle();
    return op.redispatch(dispatchKeySet, self, grad_output, weight, output_mask);
}

// aten::_cslt_compress(Tensor input) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cslt_compress::schema> create__cslt_compress_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cslt_compress::name, _cslt_compress::overload_name)
      .typed<_cslt_compress::schema>();
}

// aten::_cslt_compress(Tensor input) -> Tensor
at::Tensor _cslt_compress::call(const at::Tensor & input) {
    
    static auto op = create__cslt_compress_typed_handle();
    return op.call(input);
}

// aten::_cslt_compress(Tensor input) -> Tensor
at::Tensor _cslt_compress::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input) {
    
    static auto op = create__cslt_compress_typed_handle();
    return op.redispatch(dispatchKeySet, input);
}

// aten::_cslt_sparse_mm(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False, int alg_id=0, int split_k=1, bool split_k_one_kernel=True) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cslt_sparse_mm::schema> create__cslt_sparse_mm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cslt_sparse_mm::name, _cslt_sparse_mm::overload_name)
      .typed<_cslt_sparse_mm::schema>();
}

// aten::_cslt_sparse_mm(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False, int alg_id=0, int split_k=1, bool split_k_one_kernel=True) -> Tensor
at::Tensor _cslt_sparse_mm::call(const at::Tensor & compressed_A, const at::Tensor & dense_B, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & alpha, ::std::optional<at::ScalarType> out_dtype, bool transpose_result, int64_t alg_id, int64_t split_k, bool split_k_one_kernel) {
    
    static auto op = create__cslt_sparse_mm_typed_handle();
    return op.call(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result, alg_id, split_k, split_k_one_kernel);
}

// aten::_cslt_sparse_mm(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False, int alg_id=0, int split_k=1, bool split_k_one_kernel=True) -> Tensor
at::Tensor _cslt_sparse_mm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_A, const at::Tensor & dense_B, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & alpha, ::std::optional<at::ScalarType> out_dtype, bool transpose_result, int64_t alg_id, int64_t split_k, bool split_k_one_kernel) {
    
    static auto op = create__cslt_sparse_mm_typed_handle();
    return op.redispatch(dispatchKeySet, compressed_A, dense_B, bias, alpha, out_dtype, transpose_result, alg_id, split_k, split_k_one_kernel);
}

// aten::_cslt_sparse_mm_search(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False) -> int
static C10_NOINLINE c10::TypedOperatorHandle<_cslt_sparse_mm_search::schema> create__cslt_sparse_mm_search_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cslt_sparse_mm_search::name, _cslt_sparse_mm_search::overload_name)
      .typed<_cslt_sparse_mm_search::schema>();
}

// aten::_cslt_sparse_mm_search(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False) -> int
int64_t _cslt_sparse_mm_search::call(const at::Tensor & compressed_A, const at::Tensor & dense_B, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & alpha, ::std::optional<at::ScalarType> out_dtype, bool transpose_result) {
    
    static auto op = create__cslt_sparse_mm_search_typed_handle();
    return op.call(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result);
}

// aten::_cslt_sparse_mm_search(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False) -> int
int64_t _cslt_sparse_mm_search::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_A, const at::Tensor & dense_B, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & alpha, ::std::optional<at::ScalarType> out_dtype, bool transpose_result) {
    
    static auto op = create__cslt_sparse_mm_search_typed_handle();
    return op.redispatch(dispatchKeySet, compressed_A, dense_B, bias, alpha, out_dtype, transpose_result);
}

// aten::_sparse_semi_structured_tile(Tensor input, str algorithm="", bool use_cutlass=True) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_semi_structured_tile::schema> create__sparse_semi_structured_tile_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_semi_structured_tile::name, _sparse_semi_structured_tile::overload_name)
      .typed<_sparse_semi_structured_tile::schema>();
}

// aten::_sparse_semi_structured_tile(Tensor input, str algorithm="", bool use_cutlass=True) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _sparse_semi_structured_tile::call(const at::Tensor & input, c10::string_view algorithm, bool use_cutlass) {
    
    static auto op = create__sparse_semi_structured_tile_typed_handle();
    return op.call(input, algorithm, use_cutlass);
}

// aten::_sparse_semi_structured_tile(Tensor input, str algorithm="", bool use_cutlass=True) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _sparse_semi_structured_tile::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::string_view algorithm, bool use_cutlass) {
    
    static auto op = create__sparse_semi_structured_tile_typed_handle();
    return op.redispatch(dispatchKeySet, input, algorithm, use_cutlass);
}

// aten::_sparse_semi_structured_apply(Tensor input, Tensor thread_masks) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_semi_structured_apply::schema> create__sparse_semi_structured_apply_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_semi_structured_apply::name, _sparse_semi_structured_apply::overload_name)
      .typed<_sparse_semi_structured_apply::schema>();
}

// aten::_sparse_semi_structured_apply(Tensor input, Tensor thread_masks) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _sparse_semi_structured_apply::call(const at::Tensor & input, const at::Tensor & thread_masks) {
    
    static auto op = create__sparse_semi_structured_apply_typed_handle();
    return op.call(input, thread_masks);
}

// aten::_sparse_semi_structured_apply(Tensor input, Tensor thread_masks) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _sparse_semi_structured_apply::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & thread_masks) {
    
    static auto op = create__sparse_semi_structured_apply_typed_handle();
    return op.redispatch(dispatchKeySet, input, thread_masks);
}

// aten::_sparse_semi_structured_apply_dense(Tensor input, Tensor thread_masks) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_semi_structured_apply_dense::schema> create__sparse_semi_structured_apply_dense_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_semi_structured_apply_dense::name, _sparse_semi_structured_apply_dense::overload_name)
      .typed<_sparse_semi_structured_apply_dense::schema>();
}

// aten::_sparse_semi_structured_apply_dense(Tensor input, Tensor thread_masks) -> Tensor
at::Tensor _sparse_semi_structured_apply_dense::call(const at::Tensor & input, const at::Tensor & thread_masks) {
    
    static auto op = create__sparse_semi_structured_apply_dense_typed_handle();
    return op.call(input, thread_masks);
}

// aten::_sparse_semi_structured_apply_dense(Tensor input, Tensor thread_masks) -> Tensor
at::Tensor _sparse_semi_structured_apply_dense::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & thread_masks) {
    
    static auto op = create__sparse_semi_structured_apply_dense_typed_handle();
    return op.redispatch(dispatchKeySet, input, thread_masks);
}

// aten::_sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_semi_structured_linear::schema> create__sparse_semi_structured_linear_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_semi_structured_linear::name, _sparse_semi_structured_linear::overload_name)
      .typed<_sparse_semi_structured_linear::schema>();
}

// aten::_sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor
at::Tensor _sparse_semi_structured_linear::call(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & meta, const ::std::optional<at::Tensor> & bias, ::std::optional<c10::string_view> activation, ::std::optional<at::ScalarType> out_dtype) {
    
    static auto op = create__sparse_semi_structured_linear_typed_handle();
    return op.call(input, weight, meta, bias, activation, out_dtype);
}

// aten::_sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor
at::Tensor _sparse_semi_structured_linear::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & meta, const ::std::optional<at::Tensor> & bias, ::std::optional<c10::string_view> activation, ::std::optional<at::ScalarType> out_dtype) {
    
    static auto op = create__sparse_semi_structured_linear_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, meta, bias, activation, out_dtype);
}

// aten::_sparse_semi_structured_mm(Tensor mat1, Tensor mat1_meta, Tensor mat2, *, ScalarType? out_dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_semi_structured_mm::schema> create__sparse_semi_structured_mm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_semi_structured_mm::name, _sparse_semi_structured_mm::overload_name)
      .typed<_sparse_semi_structured_mm::schema>();
}

// aten::_sparse_semi_structured_mm(Tensor mat1, Tensor mat1_meta, Tensor mat2, *, ScalarType? out_dtype=None) -> Tensor
at::Tensor _sparse_semi_structured_mm::call(const at::Tensor & mat1, const at::Tensor & mat1_meta, const at::Tensor & mat2, ::std::optional<at::ScalarType> out_dtype) {
    
    static auto op = create__sparse_semi_structured_mm_typed_handle();
    return op.call(mat1, mat1_meta, mat2, out_dtype);
}

// aten::_sparse_semi_structured_mm(Tensor mat1, Tensor mat1_meta, Tensor mat2, *, ScalarType? out_dtype=None) -> Tensor
at::Tensor _sparse_semi_structured_mm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat1_meta, const at::Tensor & mat2, ::std::optional<at::ScalarType> out_dtype) {
    
    static auto op = create__sparse_semi_structured_mm_typed_handle();
    return op.redispatch(dispatchKeySet, mat1, mat1_meta, mat2, out_dtype);
}

// aten::_sparse_semi_structured_addmm(Tensor input, Tensor mat1, Tensor mat1_meta, Tensor mat2, *, Scalar alpha=1, Scalar beta=1, ScalarType? out_dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_semi_structured_addmm::schema> create__sparse_semi_structured_addmm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_semi_structured_addmm::name, _sparse_semi_structured_addmm::overload_name)
      .typed<_sparse_semi_structured_addmm::schema>();
}

// aten::_sparse_semi_structured_addmm(Tensor input, Tensor mat1, Tensor mat1_meta, Tensor mat2, *, Scalar alpha=1, Scalar beta=1, ScalarType? out_dtype=None) -> Tensor
at::Tensor _sparse_semi_structured_addmm::call(const at::Tensor & input, const at::Tensor & mat1, const at::Tensor & mat1_meta, const at::Tensor & mat2, const at::Scalar & alpha, const at::Scalar & beta, ::std::optional<at::ScalarType> out_dtype) {
    
    static auto op = create__sparse_semi_structured_addmm_typed_handle();
    return op.call(input, mat1, mat1_meta, mat2, alpha, beta, out_dtype);
}

// aten::_sparse_semi_structured_addmm(Tensor input, Tensor mat1, Tensor mat1_meta, Tensor mat2, *, Scalar alpha=1, Scalar beta=1, ScalarType? out_dtype=None) -> Tensor
at::Tensor _sparse_semi_structured_addmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mat1, const at::Tensor & mat1_meta, const at::Tensor & mat2, const at::Scalar & alpha, const at::Scalar & beta, ::std::optional<at::ScalarType> out_dtype) {
    
    static auto op = create__sparse_semi_structured_addmm_typed_handle();
    return op.redispatch(dispatchKeySet, input, mat1, mat1_meta, mat2, alpha, beta, out_dtype);
}

// aten::_mixed_dtypes_linear(Tensor input, Tensor weight, Tensor scale, *, Tensor? bias=None, str? activation=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_mixed_dtypes_linear::schema> create__mixed_dtypes_linear_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_mixed_dtypes_linear::name, _mixed_dtypes_linear::overload_name)
      .typed<_mixed_dtypes_linear::schema>();
}

// aten::_mixed_dtypes_linear(Tensor input, Tensor weight, Tensor scale, *, Tensor? bias=None, str? activation=None) -> Tensor
at::Tensor _mixed_dtypes_linear::call(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & scale, const ::std::optional<at::Tensor> & bias, ::std::optional<c10::string_view> activation) {
    
    static auto op = create__mixed_dtypes_linear_typed_handle();
    return op.call(input, weight, scale, bias, activation);
}

// aten::_mixed_dtypes_linear(Tensor input, Tensor weight, Tensor scale, *, Tensor? bias=None, str? activation=None) -> Tensor
at::Tensor _mixed_dtypes_linear::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & scale, const ::std::optional<at::Tensor> & bias, ::std::optional<c10::string_view> activation) {
    
    static auto op = create__mixed_dtypes_linear_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, scale, bias, activation);
}

// aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_linear_int8_weight_fp32_activation::schema> create_fbgemm_linear_int8_weight_fp32_activation_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fbgemm_linear_int8_weight_fp32_activation::name, fbgemm_linear_int8_weight_fp32_activation::overload_name)
      .typed<fbgemm_linear_int8_weight_fp32_activation::schema>();
}

// aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
at::Tensor fbgemm_linear_int8_weight_fp32_activation::call(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
    
    static auto op = create_fbgemm_linear_int8_weight_fp32_activation_typed_handle();
    return op.call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
}

// aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
at::Tensor fbgemm_linear_int8_weight_fp32_activation::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
    
    static auto op = create_fbgemm_linear_int8_weight_fp32_activation_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
}

// aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_linear_int8_weight::schema> create_fbgemm_linear_int8_weight_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fbgemm_linear_int8_weight::name, fbgemm_linear_int8_weight::overload_name)
      .typed<fbgemm_linear_int8_weight::schema>();
}

// aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
at::Tensor fbgemm_linear_int8_weight::call(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
    
    static auto op = create_fbgemm_linear_int8_weight_typed_handle();
    return op.call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
}

// aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
at::Tensor fbgemm_linear_int8_weight::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
    
    static auto op = create_fbgemm_linear_int8_weight_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
}

// aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)
static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_linear_quantize_weight::schema> create_fbgemm_linear_quantize_weight_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fbgemm_linear_quantize_weight::name, fbgemm_linear_quantize_weight::overload_name)
      .typed<fbgemm_linear_quantize_weight::schema>();
}

// aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)
::std::tuple<at::Tensor,at::Tensor,double,int64_t> fbgemm_linear_quantize_weight::call(const at::Tensor & input) {
    
    static auto op = create_fbgemm_linear_quantize_weight_typed_handle();
    return op.call(input);
}

// aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)
::std::tuple<at::Tensor,at::Tensor,double,int64_t> fbgemm_linear_quantize_weight::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input) {
    
    static auto op = create_fbgemm_linear_quantize_weight_typed_handle();
    return op.redispatch(dispatchKeySet, input);
}

// aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_pack_gemm_matrix_fp16::schema> create_fbgemm_pack_gemm_matrix_fp16_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fbgemm_pack_gemm_matrix_fp16::name, fbgemm_pack_gemm_matrix_fp16::overload_name)
      .typed<fbgemm_pack_gemm_matrix_fp16::schema>();
}

// aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor
at::Tensor fbgemm_pack_gemm_matrix_fp16::call(const at::Tensor & input) {
    
    static auto op = create_fbgemm_pack_gemm_matrix_fp16_typed_handle();
    return op.call(input);
}

// aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor
at::Tensor fbgemm_pack_gemm_matrix_fp16::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input) {
    
    static auto op = create_fbgemm_pack_gemm_matrix_fp16_typed_handle();
    return op.redispatch(dispatchKeySet, input);
}

// aten::_wrapped_linear_prepack(Tensor weight, Tensor weight_scale, Tensor weight_zero_point, Tensor bias) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_wrapped_linear_prepack::schema> create__wrapped_linear_prepack_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_wrapped_linear_prepack::name, _wrapped_linear_prepack::overload_name)
      .typed<_wrapped_linear_prepack::schema>();
}

// aten::_wrapped_linear_prepack(Tensor weight, Tensor weight_scale, Tensor weight_zero_point, Tensor bias) -> Tensor
at::Tensor _wrapped_linear_prepack::call(const at::Tensor & weight, const at::Tensor & weight_scale, const at::Tensor & weight_zero_point, const at::Tensor & bias) {
    
    static auto op = create__wrapped_linear_prepack_typed_handle();
    return op.call(weight, weight_scale, weight_zero_point, bias);
}

// aten::_wrapped_linear_prepack(Tensor weight, Tensor weight_scale, Tensor weight_zero_point, Tensor bias) -> Tensor
at::Tensor _wrapped_linear_prepack::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & weight_scale, const at::Tensor & weight_zero_point, const at::Tensor & bias) {
    
    static auto op = create__wrapped_linear_prepack_typed_handle();
    return op.redispatch(dispatchKeySet, weight, weight_scale, weight_zero_point, bias);
}

// aten::_wrapped_quantized_linear_prepacked(Tensor input, Tensor input_scale, Tensor input_zero_point, Tensor packed_weight, Tensor output_scale, Tensor output_zero_point, int out_channel) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_wrapped_quantized_linear_prepacked::schema> create__wrapped_quantized_linear_prepacked_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_wrapped_quantized_linear_prepacked::name, _wrapped_quantized_linear_prepacked::overload_name)
      .typed<_wrapped_quantized_linear_prepacked::schema>();
}

// aten::_wrapped_quantized_linear_prepacked(Tensor input, Tensor input_scale, Tensor input_zero_point, Tensor packed_weight, Tensor output_scale, Tensor output_zero_point, int out_channel) -> Tensor
at::Tensor _wrapped_quantized_linear_prepacked::call(const at::Tensor & input, const at::Tensor & input_scale, const at::Tensor & input_zero_point, const at::Tensor & packed_weight, const at::Tensor & output_scale, const at::Tensor & output_zero_point, int64_t out_channel) {
    
    static auto op = create__wrapped_quantized_linear_prepacked_typed_handle();
    return op.call(input, input_scale, input_zero_point, packed_weight, output_scale, output_zero_point, out_channel);
}

// aten::_wrapped_quantized_linear_prepacked(Tensor input, Tensor input_scale, Tensor input_zero_point, Tensor packed_weight, Tensor output_scale, Tensor output_zero_point, int out_channel) -> Tensor
at::Tensor _wrapped_quantized_linear_prepacked::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & input_scale, const at::Tensor & input_zero_point, const at::Tensor & packed_weight, const at::Tensor & output_scale, const at::Tensor & output_zero_point, int64_t out_channel) {
    
    static auto op = create__wrapped_quantized_linear_prepacked_typed_handle();
    return op.redispatch(dispatchKeySet, input, input_scale, input_zero_point, packed_weight, output_scale, output_zero_point, out_channel);
}

// aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_linear_fp16_weight_fp32_activation::schema> create_fbgemm_linear_fp16_weight_fp32_activation_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fbgemm_linear_fp16_weight_fp32_activation::name, fbgemm_linear_fp16_weight_fp32_activation::overload_name)
      .typed<fbgemm_linear_fp16_weight_fp32_activation::schema>();
}

// aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
at::Tensor fbgemm_linear_fp16_weight_fp32_activation::call(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
    
    static auto op = create_fbgemm_linear_fp16_weight_fp32_activation_typed_handle();
    return op.call(input, packed_weight, bias);
}

// aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
at::Tensor fbgemm_linear_fp16_weight_fp32_activation::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
    
    static auto op = create_fbgemm_linear_fp16_weight_fp32_activation_typed_handle();
    return op.redispatch(dispatchKeySet, input, packed_weight, bias);
}

// aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_linear_fp16_weight::schema> create_fbgemm_linear_fp16_weight_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fbgemm_linear_fp16_weight::name, fbgemm_linear_fp16_weight::overload_name)
      .typed<fbgemm_linear_fp16_weight::schema>();
}

// aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
at::Tensor fbgemm_linear_fp16_weight::call(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
    
    static auto op = create_fbgemm_linear_fp16_weight_typed_handle();
    return op.call(input, packed_weight, bias);
}

// aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
at::Tensor fbgemm_linear_fp16_weight::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
    
    static auto op = create_fbgemm_linear_fp16_weight_typed_handle();
    return op.redispatch(dispatchKeySet, input, packed_weight, bias);
}

// aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_pack_quantized_matrix::schema> create_fbgemm_pack_quantized_matrix_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fbgemm_pack_quantized_matrix::name, fbgemm_pack_quantized_matrix::overload_name)
      .typed<fbgemm_pack_quantized_matrix::schema>();
}

// aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor
at::Tensor fbgemm_pack_quantized_matrix::call(const at::Tensor & input) {
    
    static auto op = create_fbgemm_pack_quantized_matrix_typed_handle();
    return op.call(input);
}

// aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor
at::Tensor fbgemm_pack_quantized_matrix::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input) {
    
    static auto op = create_fbgemm_pack_quantized_matrix_typed_handle();
    return op.redispatch(dispatchKeySet, input);
}

// aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fbgemm_pack_quantized_matrix_KN::schema> create_fbgemm_pack_quantized_matrix_KN_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fbgemm_pack_quantized_matrix_KN::name, fbgemm_pack_quantized_matrix_KN::overload_name)
      .typed<fbgemm_pack_quantized_matrix_KN::schema>();
}

// aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor
at::Tensor fbgemm_pack_quantized_matrix_KN::call(const at::Tensor & input, int64_t K, int64_t N) {
    
    static auto op = create_fbgemm_pack_quantized_matrix_KN_typed_handle();
    return op.call(input, K, N);
}

// aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor
at::Tensor fbgemm_pack_quantized_matrix_KN::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t K, int64_t N) {
    
    static auto op = create_fbgemm_pack_quantized_matrix_KN_typed_handle();
    return op.redispatch(dispatchKeySet, input, K, N);
}

// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ldexp_Tensor::schema> create_ldexp_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ldexp_Tensor::name, ldexp_Tensor::overload_name)
      .typed<ldexp_Tensor::schema>();
}

// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor ldexp_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ldexp_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor ldexp_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ldexp_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ldexp_::schema> create_ldexp__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ldexp_::name, ldexp_::overload_name)
      .typed<ldexp_::schema>();
}

// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & ldexp_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ldexp__typed_handle();
    return op.call(self, other);
}

// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & ldexp_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ldexp__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ldexp_out::schema> create_ldexp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ldexp_out::name, ldexp_out::overload_name)
      .typed<ldexp_out::schema>();
}

// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ldexp_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_ldexp_out_typed_handle();
    return op.call(self, other, out);
}

// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ldexp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_ldexp_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linspace::schema> create_linspace_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linspace::name, linspace::overload_name)
      .typed<linspace::schema>();
}

// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor linspace::call(const at::Scalar & start, const at::Scalar & end, int64_t steps, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_linspace_typed_handle();
    return op.call(start, end, steps, dtype, layout, device, pin_memory);
}

// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor linspace::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_linspace_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, dtype, layout, device, pin_memory);
}

// aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linspace_Tensor_Tensor::schema> create_linspace_Tensor_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linspace_Tensor_Tensor::name, linspace_Tensor_Tensor::overload_name)
      .typed<linspace_Tensor_Tensor::schema>();
}

// aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor linspace_Tensor_Tensor::call(const at::Tensor & start, const at::Tensor & end, int64_t steps, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_linspace_Tensor_Tensor_typed_handle();
    return op.call(start, end, steps, dtype, layout, device, pin_memory);
}

// aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor linspace_Tensor_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_linspace_Tensor_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, dtype, layout, device, pin_memory);
}

// aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linspace_Tensor_Scalar::schema> create_linspace_Tensor_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linspace_Tensor_Scalar::name, linspace_Tensor_Scalar::overload_name)
      .typed<linspace_Tensor_Scalar::schema>();
}

// aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor linspace_Tensor_Scalar::call(const at::Tensor & start, const at::Scalar & end, int64_t steps, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_linspace_Tensor_Scalar_typed_handle();
    return op.call(start, end, steps, dtype, layout, device, pin_memory);
}

// aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor linspace_Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_linspace_Tensor_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, dtype, layout, device, pin_memory);
}

// aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linspace_Scalar_Tensor::schema> create_linspace_Scalar_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linspace_Scalar_Tensor::name, linspace_Scalar_Tensor::overload_name)
      .typed<linspace_Scalar_Tensor::schema>();
}

// aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor linspace_Scalar_Tensor::call(const at::Scalar & start, const at::Tensor & end, int64_t steps, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_linspace_Scalar_Tensor_typed_handle();
    return op.call(start, end, steps, dtype, layout, device, pin_memory);
}

// aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor linspace_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_linspace_Scalar_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, dtype, layout, device, pin_memory);
}

// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linspace_out::schema> create_linspace_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linspace_out::name, linspace_out::overload_name)
      .typed<linspace_out::schema>();
}

// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linspace_out::call(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) {
    
    static auto op = create_linspace_out_typed_handle();
    return op.call(start, end, steps, out);
}

// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linspace_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) {
    
    static auto op = create_linspace_out_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, out);
}

// aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linspace_Tensor_Tensor_out::schema> create_linspace_Tensor_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linspace_Tensor_Tensor_out::name, linspace_Tensor_Tensor_out::overload_name)
      .typed<linspace_Tensor_Tensor_out::schema>();
}

// aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linspace_Tensor_Tensor_out::call(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::Tensor & out) {
    
    static auto op = create_linspace_Tensor_Tensor_out_typed_handle();
    return op.call(start, end, steps, out);
}

// aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linspace_Tensor_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, at::Tensor & out) {
    
    static auto op = create_linspace_Tensor_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, out);
}

// aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linspace_Tensor_Scalar_out::schema> create_linspace_Tensor_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linspace_Tensor_Scalar_out::name, linspace_Tensor_Scalar_out::overload_name)
      .typed<linspace_Tensor_Scalar_out::schema>();
}

// aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linspace_Tensor_Scalar_out::call(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::Tensor & out) {
    
    static auto op = create_linspace_Tensor_Scalar_out_typed_handle();
    return op.call(start, end, steps, out);
}

// aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linspace_Tensor_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, at::Tensor & out) {
    
    static auto op = create_linspace_Tensor_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, out);
}

// aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linspace_Scalar_Tensor_out::schema> create_linspace_Scalar_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linspace_Scalar_Tensor_out::name, linspace_Scalar_Tensor_out::overload_name)
      .typed<linspace_Scalar_Tensor_out::schema>();
}

// aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linspace_Scalar_Tensor_out::call(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::Tensor & out) {
    
    static auto op = create_linspace_Scalar_Tensor_out_typed_handle();
    return op.call(start, end, steps, out);
}

// aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linspace_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, at::Tensor & out) {
    
    static auto op = create_linspace_Scalar_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, out);
}

// aten::log(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<log::schema> create_log_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log::name, log::overload_name)
      .typed<log::schema>();
}

// aten::log(Tensor self) -> Tensor
at::Tensor log::call(const at::Tensor & self) {
    
    static auto op = create_log_typed_handle();
    return op.call(self);
}

// aten::log(Tensor self) -> Tensor
at::Tensor log::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_log_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::log_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log_::schema> create_log__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_::name, log_::overload_name)
      .typed<log_::schema>();
}

// aten::log_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & log_::call(at::Tensor & self) {
    
    static auto op = create_log__typed_handle();
    return op.call(self);
}

// aten::log_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & log_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_log__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log_out::schema> create_log_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_out::name, log_out::overload_name)
      .typed<log_out::schema>();
}

// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_log_out_typed_handle();
    return op.call(self, out);
}

// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_log_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::log10(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<log10::schema> create_log10_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log10::name, log10::overload_name)
      .typed<log10::schema>();
}

// aten::log10(Tensor self) -> Tensor
at::Tensor log10::call(const at::Tensor & self) {
    
    static auto op = create_log10_typed_handle();
    return op.call(self);
}

// aten::log10(Tensor self) -> Tensor
at::Tensor log10::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_log10_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::log10_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log10_::schema> create_log10__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log10_::name, log10_::overload_name)
      .typed<log10_::schema>();
}

// aten::log10_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & log10_::call(at::Tensor & self) {
    
    static auto op = create_log10__typed_handle();
    return op.call(self);
}

// aten::log10_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & log10_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_log10__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log10_out::schema> create_log10_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log10_out::name, log10_out::overload_name)
      .typed<log10_out::schema>();
}

// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log10_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_log10_out_typed_handle();
    return op.call(self, out);
}

// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log10_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_log10_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::log1p(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<log1p::schema> create_log1p_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log1p::name, log1p::overload_name)
      .typed<log1p::schema>();
}

// aten::log1p(Tensor self) -> Tensor
at::Tensor log1p::call(const at::Tensor & self) {
    
    static auto op = create_log1p_typed_handle();
    return op.call(self);
}

// aten::log1p(Tensor self) -> Tensor
at::Tensor log1p::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_log1p_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::log1p_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log1p_::schema> create_log1p__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log1p_::name, log1p_::overload_name)
      .typed<log1p_::schema>();
}

// aten::log1p_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & log1p_::call(at::Tensor & self) {
    
    static auto op = create_log1p__typed_handle();
    return op.call(self);
}

// aten::log1p_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & log1p_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_log1p__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log1p_out::schema> create_log1p_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log1p_out::name, log1p_out::overload_name)
      .typed<log1p_out::schema>();
}

// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log1p_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_log1p_out_typed_handle();
    return op.call(self, out);
}

// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log1p_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_log1p_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::log2(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<log2::schema> create_log2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log2::name, log2::overload_name)
      .typed<log2::schema>();
}

// aten::log2(Tensor self) -> Tensor
at::Tensor log2::call(const at::Tensor & self) {
    
    static auto op = create_log2_typed_handle();
    return op.call(self);
}

// aten::log2(Tensor self) -> Tensor
at::Tensor log2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_log2_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::log2_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log2_::schema> create_log2__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log2_::name, log2_::overload_name)
      .typed<log2_::schema>();
}

// aten::log2_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & log2_::call(at::Tensor & self) {
    
    static auto op = create_log2__typed_handle();
    return op.call(self);
}

// aten::log2_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & log2_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_log2__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log2_out::schema> create_log2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log2_out::name, log2_out::overload_name)
      .typed<log2_out::schema>();
}

// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log2_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_log2_out_typed_handle();
    return op.call(self, out);
}

// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_log2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logaddexp_out::schema> create_logaddexp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logaddexp_out::name, logaddexp_out::overload_name)
      .typed<logaddexp_out::schema>();
}

// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logaddexp_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_logaddexp_out_typed_handle();
    return op.call(self, other, out);
}

// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logaddexp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_logaddexp_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::logaddexp(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logaddexp::schema> create_logaddexp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logaddexp::name, logaddexp::overload_name)
      .typed<logaddexp::schema>();
}

// aten::logaddexp(Tensor self, Tensor other) -> Tensor
at::Tensor logaddexp::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logaddexp_typed_handle();
    return op.call(self, other);
}

// aten::logaddexp(Tensor self, Tensor other) -> Tensor
at::Tensor logaddexp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logaddexp_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logaddexp2_out::schema> create_logaddexp2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logaddexp2_out::name, logaddexp2_out::overload_name)
      .typed<logaddexp2_out::schema>();
}

// aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logaddexp2_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_logaddexp2_out_typed_handle();
    return op.call(self, other, out);
}

// aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logaddexp2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_logaddexp2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::logaddexp2(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logaddexp2::schema> create_logaddexp2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logaddexp2::name, logaddexp2::overload_name)
      .typed<logaddexp2::schema>();
}

// aten::logaddexp2(Tensor self, Tensor other) -> Tensor
at::Tensor logaddexp2::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logaddexp2_typed_handle();
    return op.call(self, other);
}

// aten::logaddexp2(Tensor self, Tensor other) -> Tensor
at::Tensor logaddexp2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_logaddexp2_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<xlogy_Tensor::schema> create_xlogy_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(xlogy_Tensor::name, xlogy_Tensor::overload_name)
      .typed<xlogy_Tensor::schema>();
}

// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor xlogy_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_xlogy_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor xlogy_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_xlogy_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<xlogy_Scalar_Self::schema> create_xlogy_Scalar_Self_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(xlogy_Scalar_Self::name, xlogy_Scalar_Self::overload_name)
      .typed<xlogy_Scalar_Self::schema>();
}

// aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor
at::Tensor xlogy_Scalar_Self::call(const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_xlogy_Scalar_Self_typed_handle();
    return op.call(self, other);
}

// aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor
at::Tensor xlogy_Scalar_Self::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_xlogy_Scalar_Self_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<xlogy_Scalar_Other::schema> create_xlogy_Scalar_Other_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(xlogy_Scalar_Other::name, xlogy_Scalar_Other::overload_name)
      .typed<xlogy_Scalar_Other::schema>();
}

// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor
at::Tensor xlogy_Scalar_Other::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_xlogy_Scalar_Other_typed_handle();
    return op.call(self, other);
}

// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor
at::Tensor xlogy_Scalar_Other::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_xlogy_Scalar_Other_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<xlogy__Tensor::schema> create_xlogy__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(xlogy__Tensor::name, xlogy__Tensor::overload_name)
      .typed<xlogy__Tensor::schema>();
}

// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & xlogy__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_xlogy__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & xlogy__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_xlogy__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<xlogy__Scalar_Other::schema> create_xlogy__Scalar_Other_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(xlogy__Scalar_Other::name, xlogy__Scalar_Other::overload_name)
      .typed<xlogy__Scalar_Other::schema>();
}

// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & xlogy__Scalar_Other::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_xlogy__Scalar_Other_typed_handle();
    return op.call(self, other);
}

// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & xlogy__Scalar_Other::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_xlogy__Scalar_Other_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<xlogy_OutTensor::schema> create_xlogy_OutTensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(xlogy_OutTensor::name, xlogy_OutTensor::overload_name)
      .typed<xlogy_OutTensor::schema>();
}

// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & xlogy_OutTensor::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_xlogy_OutTensor_typed_handle();
    return op.call(self, other, out);
}

// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & xlogy_OutTensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_xlogy_OutTensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<xlogy_OutScalar_Self::schema> create_xlogy_OutScalar_Self_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(xlogy_OutScalar_Self::name, xlogy_OutScalar_Self::overload_name)
      .typed<xlogy_OutScalar_Self::schema>();
}

// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & xlogy_OutScalar_Self::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_xlogy_OutScalar_Self_typed_handle();
    return op.call(self, other, out);
}

// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & xlogy_OutScalar_Self::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_xlogy_OutScalar_Self_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<xlogy_OutScalar_Other::schema> create_xlogy_OutScalar_Other_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(xlogy_OutScalar_Other::name, xlogy_OutScalar_Other::overload_name)
      .typed<xlogy_OutScalar_Other::schema>();
}

// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & xlogy_OutScalar_Other::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_xlogy_OutScalar_Other_typed_handle();
    return op.call(self, other, out);
}

// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & xlogy_OutScalar_Other::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_xlogy_OutScalar_Other_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logspace::schema> create_logspace_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace::name, logspace::overload_name)
      .typed<logspace::schema>();
}

// aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace::call(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_typed_handle();
    return op.call(start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logspace_Tensor_Tensor::schema> create_logspace_Tensor_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace_Tensor_Tensor::name, logspace_Tensor_Tensor::overload_name)
      .typed<logspace_Tensor_Tensor::schema>();
}

// aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace_Tensor_Tensor::call(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_Tensor_Tensor_typed_handle();
    return op.call(start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace_Tensor_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_Tensor_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logspace_Tensor_Scalar::schema> create_logspace_Tensor_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace_Tensor_Scalar::name, logspace_Tensor_Scalar::overload_name)
      .typed<logspace_Tensor_Scalar::schema>();
}

// aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace_Tensor_Scalar::call(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_Tensor_Scalar_typed_handle();
    return op.call(start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace_Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_Tensor_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logspace_Scalar_Tensor::schema> create_logspace_Scalar_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace_Scalar_Tensor::name, logspace_Scalar_Tensor::overload_name)
      .typed<logspace_Scalar_Tensor::schema>();
}

// aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace_Scalar_Tensor::call(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_Scalar_Tensor_typed_handle();
    return op.call(start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor logspace_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_logspace_Scalar_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, dtype, layout, device, pin_memory);
}

// aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logspace_out::schema> create_logspace_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace_out::name, logspace_out::overload_name)
      .typed<logspace_out::schema>();
}

// aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_out::call(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_out_typed_handle();
    return op.call(start, end, steps, base, out);
}

// aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_out_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, out);
}

// aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logspace_Tensor_Tensor_out::schema> create_logspace_Tensor_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace_Tensor_Tensor_out::name, logspace_Tensor_Tensor_out::overload_name)
      .typed<logspace_Tensor_Tensor_out::schema>();
}

// aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_Tensor_Tensor_out::call(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_Tensor_Tensor_out_typed_handle();
    return op.call(start, end, steps, base, out);
}

// aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_Tensor_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_Tensor_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, out);
}

// aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logspace_Tensor_Scalar_out::schema> create_logspace_Tensor_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace_Tensor_Scalar_out::name, logspace_Tensor_Scalar_out::overload_name)
      .typed<logspace_Tensor_Scalar_out::schema>();
}

// aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_Tensor_Scalar_out::call(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_Tensor_Scalar_out_typed_handle();
    return op.call(start, end, steps, base, out);
}

// aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_Tensor_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_Tensor_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, out);
}

// aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logspace_Scalar_Tensor_out::schema> create_logspace_Scalar_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logspace_Scalar_Tensor_out::name, logspace_Scalar_Tensor_out::overload_name)
      .typed<logspace_Scalar_Tensor_out::schema>();
}

// aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_Scalar_Tensor_out::call(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_Scalar_Tensor_out_typed_handle();
    return op.call(start, end, steps, base, out);
}

// aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logspace_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out) {
    
    static auto op = create_logspace_Scalar_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, steps, base, out);
}

// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<log_softmax_int::schema> create_log_softmax_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_softmax_int::name, log_softmax_int::overload_name)
      .typed<log_softmax_int::schema>();
}

// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor log_softmax_int::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_log_softmax_int_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor log_softmax_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_log_softmax_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log_softmax_int_out::schema> create_log_softmax_int_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_softmax_int_out::name, log_softmax_int_out::overload_name)
      .typed<log_softmax_int_out::schema>();
}

// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log_softmax_int_out::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_log_softmax_int_out_typed_handle();
    return op.call(self, dim, dtype, out);
}

// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log_softmax_int_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_log_softmax_int_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype, out);
}

// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<log_softmax_Dimname::schema> create_log_softmax_Dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_softmax_Dimname::name, log_softmax_Dimname::overload_name)
      .typed<log_softmax_Dimname::schema>();
}

// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor log_softmax_Dimname::call(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_log_softmax_Dimname_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor log_softmax_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_log_softmax_Dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_log_softmax::schema> create__log_softmax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_log_softmax::name, _log_softmax::overload_name)
      .typed<_log_softmax::schema>();
}

// aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
at::Tensor _log_softmax::call(const at::Tensor & self, int64_t dim, bool half_to_float) {
    
    static auto op = create__log_softmax_typed_handle();
    return op.call(self, dim, half_to_float);
}

// aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
at::Tensor _log_softmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) {
    
    static auto op = create__log_softmax_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, half_to_float);
}

// aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_log_softmax_out::schema> create__log_softmax_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_log_softmax_out::name, _log_softmax_out::overload_name)
      .typed<_log_softmax_out::schema>();
}

// aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _log_softmax_out::call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
    
    static auto op = create__log_softmax_out_typed_handle();
    return op.call(self, dim, half_to_float, out);
}

// aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _log_softmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
    
    static auto op = create__log_softmax_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, half_to_float, out);
}

// aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_log_softmax_backward_data::schema> create__log_softmax_backward_data_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_log_softmax_backward_data::name, _log_softmax_backward_data::overload_name)
      .typed<_log_softmax_backward_data::schema>();
}

// aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
at::Tensor _log_softmax_backward_data::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
    
    static auto op = create__log_softmax_backward_data_typed_handle();
    return op.call(grad_output, output, dim, input_dtype);
}

// aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
at::Tensor _log_softmax_backward_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
    
    static auto op = create__log_softmax_backward_data_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, dim, input_dtype);
}

// aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_log_softmax_backward_data_out::schema> create__log_softmax_backward_data_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_log_softmax_backward_data_out::name, _log_softmax_backward_data_out::overload_name)
      .typed<_log_softmax_backward_data_out::schema>();
}

// aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _log_softmax_backward_data_out::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out) {
    
    static auto op = create__log_softmax_backward_data_out_typed_handle();
    return op.call(grad_output, output, dim, input_dtype, out);
}

// aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _log_softmax_backward_data_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out) {
    
    static auto op = create__log_softmax_backward_data_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, dim, input_dtype, out);
}

// aten::_logcumsumexp(Tensor self, int dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_logcumsumexp::schema> create__logcumsumexp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_logcumsumexp::name, _logcumsumexp::overload_name)
      .typed<_logcumsumexp::schema>();
}

// aten::_logcumsumexp(Tensor self, int dim) -> Tensor
at::Tensor _logcumsumexp::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create__logcumsumexp_typed_handle();
    return op.call(self, dim);
}

// aten::_logcumsumexp(Tensor self, int dim) -> Tensor
at::Tensor _logcumsumexp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create__logcumsumexp_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_logcumsumexp_out::schema> create__logcumsumexp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_logcumsumexp_out::name, _logcumsumexp_out::overload_name)
      .typed<_logcumsumexp_out::schema>();
}

// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _logcumsumexp_out::call(const at::Tensor & self, int64_t dim, at::Tensor & out) {
    
    static auto op = create__logcumsumexp_out_typed_handle();
    return op.call(self, dim, out);
}

// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _logcumsumexp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) {
    
    static auto op = create__logcumsumexp_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, out);
}

// aten::logcumsumexp(Tensor self, int dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logcumsumexp::schema> create_logcumsumexp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logcumsumexp::name, logcumsumexp::overload_name)
      .typed<logcumsumexp::schema>();
}

// aten::logcumsumexp(Tensor self, int dim) -> Tensor
at::Tensor logcumsumexp::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_logcumsumexp_typed_handle();
    return op.call(self, dim);
}

// aten::logcumsumexp(Tensor self, int dim) -> Tensor
at::Tensor logcumsumexp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_logcumsumexp_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logcumsumexp_out::schema> create_logcumsumexp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logcumsumexp_out::name, logcumsumexp_out::overload_name)
      .typed<logcumsumexp_out::schema>();
}

// aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logcumsumexp_out::call(const at::Tensor & self, int64_t dim, at::Tensor & out) {
    
    static auto op = create_logcumsumexp_out_typed_handle();
    return op.call(self, dim, out);
}

// aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logcumsumexp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) {
    
    static auto op = create_logcumsumexp_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, out);
}

// aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logcumsumexp_dimname::schema> create_logcumsumexp_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logcumsumexp_dimname::name, logcumsumexp_dimname::overload_name)
      .typed<logcumsumexp_dimname::schema>();
}

// aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor
at::Tensor logcumsumexp_dimname::call(const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_logcumsumexp_dimname_typed_handle();
    return op.call(self, dim);
}

// aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor
at::Tensor logcumsumexp_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_logcumsumexp_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logcumsumexp_dimname_out::schema> create_logcumsumexp_dimname_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logcumsumexp_dimname_out::name, logcumsumexp_dimname_out::overload_name)
      .typed<logcumsumexp_dimname_out::schema>();
}

// aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logcumsumexp_dimname_out::call(const at::Tensor & self, at::Dimname dim, at::Tensor & out) {
    
    static auto op = create_logcumsumexp_dimname_out_typed_handle();
    return op.call(self, dim, out);
}

// aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logcumsumexp_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & out) {
    
    static auto op = create_logcumsumexp_dimname_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, out);
}

// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logsumexp::schema> create_logsumexp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logsumexp::name, logsumexp::overload_name)
      .typed<logsumexp::schema>();
}

// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
at::Tensor logsumexp::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_logsumexp_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
at::Tensor logsumexp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_logsumexp_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logsumexp_out::schema> create_logsumexp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logsumexp_out::name, logsumexp_out::overload_name)
      .typed<logsumexp_out::schema>();
}

// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logsumexp_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_logsumexp_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logsumexp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_logsumexp_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logsumexp_names::schema> create_logsumexp_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logsumexp_names::name, logsumexp_names::overload_name)
      .typed<logsumexp_names::schema>();
}

// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
at::Tensor logsumexp_names::call(const at::Tensor & self, at::DimnameList dim, bool keepdim) {
    
    static auto op = create_logsumexp_names_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
at::Tensor logsumexp_names::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim) {
    
    static auto op = create_logsumexp_names_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logsumexp_names_out::schema> create_logsumexp_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logsumexp_names_out::name, logsumexp_names_out::overload_name)
      .typed<logsumexp_names_out::schema>();
}

// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logsumexp_names_out::call(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_logsumexp_names_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logsumexp_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_logsumexp_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<margin_ranking_loss::schema> create_margin_ranking_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(margin_ranking_loss::name, margin_ranking_loss::overload_name)
      .typed<margin_ranking_loss::schema>();
}

// aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
at::Tensor margin_ranking_loss::call(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
    
    static auto op = create_margin_ranking_loss_typed_handle();
    return op.call(input1, input2, target, margin, reduction);
}

// aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
at::Tensor margin_ranking_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
    
    static auto op = create_margin_ranking_loss_typed_handle();
    return op.redispatch(dispatchKeySet, input1, input2, target, margin, reduction);
}

// aten::matmul(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<matmul::schema> create_matmul_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(matmul::name, matmul::overload_name)
      .typed<matmul::schema>();
}

// aten::matmul(Tensor self, Tensor other) -> Tensor
at::Tensor matmul::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_matmul_typed_handle();
    return op.call(self, other);
}

// aten::matmul(Tensor self, Tensor other) -> Tensor
at::Tensor matmul::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_matmul_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<matmul_backward::schema> create_matmul_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(matmul_backward::name, matmul_backward::overload_name)
      .typed<matmul_backward::schema>();
}

// aten::matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> matmul_backward::call(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask) {
    
    static auto op = create_matmul_backward_typed_handle();
    return op.call(grad, self, other, mask);
}

// aten::matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> matmul_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask) {
    
    static auto op = create_matmul_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, self, other, mask);
}

// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<matmul_out::schema> create_matmul_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(matmul_out::name, matmul_out::overload_name)
      .typed<matmul_out::schema>();
}

// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & matmul_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_matmul_out_typed_handle();
    return op.call(self, other, out);
}

// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & matmul_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_matmul_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::matrix_power(Tensor self, int n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<matrix_power::schema> create_matrix_power_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(matrix_power::name, matrix_power::overload_name)
      .typed<matrix_power::schema>();
}

// aten::matrix_power(Tensor self, int n) -> Tensor
at::Tensor matrix_power::call(const at::Tensor & self, int64_t n) {
    
    static auto op = create_matrix_power_typed_handle();
    return op.call(self, n);
}

// aten::matrix_power(Tensor self, int n) -> Tensor
at::Tensor matrix_power::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n) {
    
    static auto op = create_matrix_power_typed_handle();
    return op.redispatch(dispatchKeySet, self, n);
}

// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<matrix_power_out::schema> create_matrix_power_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(matrix_power_out::name, matrix_power_out::overload_name)
      .typed<matrix_power_out::schema>();
}

// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & matrix_power_out::call(const at::Tensor & self, int64_t n, at::Tensor & out) {
    
    static auto op = create_matrix_power_out_typed_handle();
    return op.call(self, n, out);
}

// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & matrix_power_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out) {
    
    static auto op = create_matrix_power_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, out);
}

// aten::matrix_exp(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<matrix_exp::schema> create_matrix_exp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(matrix_exp::name, matrix_exp::overload_name)
      .typed<matrix_exp::schema>();
}

// aten::matrix_exp(Tensor self) -> Tensor
at::Tensor matrix_exp::call(const at::Tensor & self) {
    
    static auto op = create_matrix_exp_typed_handle();
    return op.call(self);
}

// aten::matrix_exp(Tensor self) -> Tensor
at::Tensor matrix_exp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_matrix_exp_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<matrix_exp_backward::schema> create_matrix_exp_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(matrix_exp_backward::name, matrix_exp_backward::overload_name)
      .typed<matrix_exp_backward::schema>();
}

// aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor
at::Tensor matrix_exp_backward::call(const at::Tensor & self, const at::Tensor & grad) {
    
    static auto op = create_matrix_exp_backward_typed_handle();
    return op.call(self, grad);
}

// aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor
at::Tensor matrix_exp_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad) {
    
    static auto op = create_matrix_exp_backward_typed_handle();
    return op.redispatch(dispatchKeySet, self, grad);
}

// aten::_aminmax(Tensor self) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_aminmax::schema> create__aminmax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_aminmax::name, _aminmax::overload_name)
      .typed<_aminmax::schema>();
}

// aten::_aminmax(Tensor self) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _aminmax::call(const at::Tensor & self) {
    
    static auto op = create__aminmax_typed_handle();
    return op.call(self);
}

// aten::_aminmax(Tensor self) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _aminmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__aminmax_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_aminmax_dim::schema> create__aminmax_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_aminmax_dim::name, _aminmax_dim::overload_name)
      .typed<_aminmax_dim::schema>();
}

// aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _aminmax_dim::call(const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create__aminmax_dim_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _aminmax_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create__aminmax_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
static C10_NOINLINE c10::TypedOperatorHandle<aminmax::schema> create_aminmax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(aminmax::name, aminmax::overload_name)
      .typed<aminmax::schema>();
}

// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
::std::tuple<at::Tensor,at::Tensor> aminmax::call(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim) {
    
    static auto op = create_aminmax_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
::std::tuple<at::Tensor,at::Tensor> aminmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim) {
    
    static auto op = create_aminmax_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)
static C10_NOINLINE c10::TypedOperatorHandle<aminmax_out::schema> create_aminmax_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(aminmax_out::name, aminmax_out::overload_name)
      .typed<aminmax_out::schema>();
}

// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)
::std::tuple<at::Tensor &,at::Tensor &> aminmax_out::call(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & min, at::Tensor & max) {
    
    static auto op = create_aminmax_out_typed_handle();
    return op.call(self, dim, keepdim, min, max);
}

// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)
::std::tuple<at::Tensor &,at::Tensor &> aminmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & min, at::Tensor & max) {
    
    static auto op = create_aminmax_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, min, max);
}

// aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_compute_linear_combination::schema> create__compute_linear_combination_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_compute_linear_combination::name, _compute_linear_combination::overload_name)
      .typed<_compute_linear_combination::schema>();
}

// aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor
at::Tensor _compute_linear_combination::call(const at::Tensor & input, const at::Tensor & coefficients) {
    
    static auto op = create__compute_linear_combination_typed_handle();
    return op.call(input, coefficients);
}

// aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor
at::Tensor _compute_linear_combination::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & coefficients) {
    
    static auto op = create__compute_linear_combination_typed_handle();
    return op.redispatch(dispatchKeySet, input, coefficients);
}

// aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_compute_linear_combination_out::schema> create__compute_linear_combination_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_compute_linear_combination_out::name, _compute_linear_combination_out::overload_name)
      .typed<_compute_linear_combination_out::schema>();
}

// aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _compute_linear_combination_out::call(const at::Tensor & input, const at::Tensor & coefficients, at::Tensor & out) {
    
    static auto op = create__compute_linear_combination_out_typed_handle();
    return op.call(input, coefficients, out);
}

// aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _compute_linear_combination_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & coefficients, at::Tensor & out) {
    
    static auto op = create__compute_linear_combination_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, coefficients, out);
}

// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<max_dim::schema> create_max_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_dim::name, max_dim::overload_name)
      .typed<max_dim::schema>();
}

// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> max_dim::call(const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_max_dim_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> max_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_max_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<max_dim_max::schema> create_max_dim_max_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_dim_max::name, max_dim_max::overload_name)
      .typed<max_dim_max::schema>();
}

// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> max_dim_max::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
    
    static auto op = create_max_dim_max_typed_handle();
    return op.call(self, dim, keepdim, max, max_values);
}

// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> max_dim_max::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
    
    static auto op = create_max_dim_max_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, max, max_values);
}

// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<max_names_dim::schema> create_max_names_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_names_dim::name, max_names_dim::overload_name)
      .typed<max_names_dim::schema>();
}

// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> max_names_dim::call(const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_max_names_dim_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> max_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_max_names_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<max_names_dim_max::schema> create_max_names_dim_max_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_names_dim_max::name, max_names_dim_max::overload_name)
      .typed<max_names_dim_max::schema>();
}

// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> max_names_dim_max::call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
    
    static auto op = create_max_names_dim_max_typed_handle();
    return op.call(self, dim, keepdim, max, max_values);
}

// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> max_names_dim_max::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
    
    static auto op = create_max_names_dim_max_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, max, max_values);
}

// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<value_selecting_reduction_backward::schema> create_value_selecting_reduction_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(value_selecting_reduction_backward::name, value_selecting_reduction_backward::overload_name)
      .typed<value_selecting_reduction_backward::schema>();
}

// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor
at::Tensor value_selecting_reduction_backward::call(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) {
    
    static auto op = create_value_selecting_reduction_backward_typed_handle();
    return op.call(grad, dim, indices, sizes, keepdim);
}

// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor
at::Tensor value_selecting_reduction_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) {
    
    static auto op = create_value_selecting_reduction_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, dim, indices, sizes, keepdim);
}

// aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<amax::schema> create_amax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(amax::name, amax::overload_name)
      .typed<amax::schema>();
}

// aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
at::Tensor amax::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_amax_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
at::Tensor amax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_amax_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<amax_out::schema> create_amax_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(amax_out::name, amax_out::overload_name)
      .typed<amax_out::schema>();
}

// aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & amax_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_amax_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & amax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_amax_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<max_pool1d_with_indices::schema> create_max_pool1d_with_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool1d_with_indices::name, max_pool1d_with_indices::overload_name)
      .typed<max_pool1d_with_indices::schema>();
}

// aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> max_pool1d_with_indices::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_max_pool1d_with_indices_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> max_pool1d_with_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_max_pool1d_with_indices_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<max_pool1d::schema> create_max_pool1d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool1d::name, max_pool1d::overload_name)
      .typed<max_pool1d::schema>();
}

// aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor max_pool1d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_max_pool1d_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor max_pool1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_max_pool1d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<max_pool2d::schema> create_max_pool2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool2d::name, max_pool2d::overload_name)
      .typed<max_pool2d::schema>();
}

// aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor max_pool2d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_max_pool2d_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor max_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_max_pool2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<max_pool2d_backward::schema> create_max_pool2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool2d_backward::name, max_pool2d_backward::overload_name)
      .typed<max_pool2d_backward::schema>();
}

// aten::max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor max_pool2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_max_pool2d_backward_typed_handle();
    return op.call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor max_pool2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_max_pool2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool2d::schema> create_mkldnn_max_pool2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_max_pool2d::name, mkldnn_max_pool2d::overload_name)
      .typed<mkldnn_max_pool2d::schema>();
}

// aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor mkldnn_max_pool2d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_mkldnn_max_pool2d_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor mkldnn_max_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_mkldnn_max_pool2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool2d_backward::schema> create_mkldnn_max_pool2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_max_pool2d_backward::name, mkldnn_max_pool2d_backward::overload_name)
      .typed<mkldnn_max_pool2d_backward::schema>();
}

// aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor mkldnn_max_pool2d_backward::call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_mkldnn_max_pool2d_backward_typed_handle();
    return op.call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor mkldnn_max_pool2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_mkldnn_max_pool2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool3d::schema> create_mkldnn_max_pool3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_max_pool3d::name, mkldnn_max_pool3d::overload_name)
      .typed<mkldnn_max_pool3d::schema>();
}

// aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor mkldnn_max_pool3d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_mkldnn_max_pool3d_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor mkldnn_max_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_mkldnn_max_pool3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool3d_backward::schema> create_mkldnn_max_pool3d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_max_pool3d_backward::name, mkldnn_max_pool3d_backward::overload_name)
      .typed<mkldnn_max_pool3d_backward::schema>();
}

// aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor mkldnn_max_pool3d_backward::call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_mkldnn_max_pool3d_backward_typed_handle();
    return op.call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor mkldnn_max_pool3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_mkldnn_max_pool3d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantized_max_pool1d::schema> create_quantized_max_pool1d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantized_max_pool1d::name, quantized_max_pool1d::overload_name)
      .typed<quantized_max_pool1d::schema>();
}

// aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor quantized_max_pool1d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_quantized_max_pool1d_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor quantized_max_pool1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_quantized_max_pool1d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantized_max_pool2d::schema> create_quantized_max_pool2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantized_max_pool2d::name, quantized_max_pool2d::overload_name)
      .typed<quantized_max_pool2d::schema>();
}

// aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor quantized_max_pool2d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_quantized_max_pool2d_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor quantized_max_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_quantized_max_pool2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantized_max_pool3d::schema> create_quantized_max_pool3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantized_max_pool3d::name, quantized_max_pool3d::overload_name)
      .typed<quantized_max_pool3d::schema>();
}

// aten::quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor quantized_max_pool3d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_quantized_max_pool3d_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor quantized_max_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_quantized_max_pool3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<max_pool3d::schema> create_max_pool3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool3d::name, max_pool3d::overload_name)
      .typed<max_pool3d::schema>();
}

// aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor max_pool3d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_max_pool3d_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
at::Tensor max_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_max_pool3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mean::schema> create_mean_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mean::name, mean::overload_name)
      .typed<mean::schema>();
}

// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor
at::Tensor mean::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_mean_typed_handle();
    return op.call(self, dtype);
}

// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor
at::Tensor mean::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_mean_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype);
}

// aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mean_dtype_out::schema> create_mean_dtype_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mean_dtype_out::name, mean_dtype_out::overload_name)
      .typed<mean_dtype_out::schema>();
}

// aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mean_dtype_out::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_mean_dtype_out_typed_handle();
    return op.call(self, dtype, out);
}

// aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mean_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_mean_dtype_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, out);
}

// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mean_dim::schema> create_mean_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mean_dim::name, mean_dim::overload_name)
      .typed<mean_dim::schema>();
}

// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor mean_dim::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_mean_dim_typed_handle();
    return op.call(self, dim, keepdim, dtype);
}

// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor mean_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_mean_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
}

// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mean_out::schema> create_mean_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mean_out::name, mean_out::overload_name)
      .typed<mean_out::schema>();
}

// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mean_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_mean_out_typed_handle();
    return op.call(self, dim, keepdim, dtype, out);
}

// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mean_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_mean_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
}

// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mean_names_dim::schema> create_mean_names_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mean_names_dim::name, mean_names_dim::overload_name)
      .typed<mean_names_dim::schema>();
}

// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor mean_names_dim::call(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_mean_names_dim_typed_handle();
    return op.call(self, dim, keepdim, dtype);
}

// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor mean_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_mean_names_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
}

// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mean_names_out::schema> create_mean_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mean_names_out::name, mean_names_out::overload_name)
      .typed<mean_names_out::schema>();
}

// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mean_names_out::call(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_mean_names_out_typed_handle();
    return op.call(self, dim, keepdim, dtype, out);
}

// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mean_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_mean_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
}

// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nanmean::schema> create_nanmean_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nanmean::name, nanmean::overload_name)
      .typed<nanmean::schema>();
}

// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor nanmean::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_nanmean_typed_handle();
    return op.call(self, dim, keepdim, dtype);
}

// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor nanmean::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_nanmean_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
}

// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nanmean_out::schema> create_nanmean_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nanmean_out::name, nanmean_out::overload_name)
      .typed<nanmean_out::schema>();
}

// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nanmean_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_nanmean_out_typed_handle();
    return op.call(self, dim, keepdim, dtype, out);
}

// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nanmean_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_nanmean_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
}

// aten::median(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<median::schema> create_median_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(median::name, median::overload_name)
      .typed<median::schema>();
}

// aten::median(Tensor self) -> Tensor
at::Tensor median::call(const at::Tensor & self) {
    
    static auto op = create_median_typed_handle();
    return op.call(self);
}

// aten::median(Tensor self) -> Tensor
at::Tensor median::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_median_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<median_dim::schema> create_median_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(median_dim::name, median_dim::overload_name)
      .typed<median_dim::schema>();
}

// aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> median_dim::call(const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_median_dim_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> median_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_median_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<median_dim_values::schema> create_median_dim_values_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(median_dim_values::name, median_dim_values::overload_name)
      .typed<median_dim_values::schema>();
}

// aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> median_dim_values::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_median_dim_values_typed_handle();
    return op.call(self, dim, keepdim, values, indices);
}

// aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> median_dim_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_median_dim_values_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, values, indices);
}

// aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<median_names_dim::schema> create_median_names_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(median_names_dim::name, median_names_dim::overload_name)
      .typed<median_names_dim::schema>();
}

// aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> median_names_dim::call(const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_median_names_dim_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> median_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_median_names_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<median_names_dim_values::schema> create_median_names_dim_values_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(median_names_dim_values::name, median_names_dim_values::overload_name)
      .typed<median_names_dim_values::schema>();
}

// aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> median_names_dim_values::call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_median_names_dim_values_typed_handle();
    return op.call(self, dim, keepdim, values, indices);
}

// aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> median_names_dim_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_median_names_dim_values_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, values, indices);
}

// aten::nanmedian(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nanmedian::schema> create_nanmedian_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nanmedian::name, nanmedian::overload_name)
      .typed<nanmedian::schema>();
}

// aten::nanmedian(Tensor self) -> Tensor
at::Tensor nanmedian::call(const at::Tensor & self) {
    
    static auto op = create_nanmedian_typed_handle();
    return op.call(self);
}

// aten::nanmedian(Tensor self) -> Tensor
at::Tensor nanmedian::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_nanmedian_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<nanmedian_dim::schema> create_nanmedian_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nanmedian_dim::name, nanmedian_dim::overload_name)
      .typed<nanmedian_dim::schema>();
}

// aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> nanmedian_dim::call(const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_nanmedian_dim_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> nanmedian_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_nanmedian_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<nanmedian_dim_values::schema> create_nanmedian_dim_values_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nanmedian_dim_values::name, nanmedian_dim_values::overload_name)
      .typed<nanmedian_dim_values::schema>();
}

// aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> nanmedian_dim_values::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_nanmedian_dim_values_typed_handle();
    return op.call(self, dim, keepdim, values, indices);
}

// aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> nanmedian_dim_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_nanmedian_dim_values_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, values, indices);
}

// aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<nanmedian_names_dim::schema> create_nanmedian_names_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nanmedian_names_dim::name, nanmedian_names_dim::overload_name)
      .typed<nanmedian_names_dim::schema>();
}

// aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> nanmedian_names_dim::call(const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_nanmedian_names_dim_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> nanmedian_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_nanmedian_names_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<nanmedian_names_dim_values::schema> create_nanmedian_names_dim_values_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nanmedian_names_dim_values::name, nanmedian_names_dim_values::overload_name)
      .typed<nanmedian_names_dim_values::schema>();
}

// aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> nanmedian_names_dim_values::call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_nanmedian_names_dim_values_typed_handle();
    return op.call(self, dim, keepdim, values, indices);
}

// aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> nanmedian_names_dim_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_nanmedian_names_dim_values_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, values, indices);
}

// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<min_dim::schema> create_min_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(min_dim::name, min_dim::overload_name)
      .typed<min_dim::schema>();
}

// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> min_dim::call(const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_min_dim_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> min_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_min_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<min_dim_min::schema> create_min_dim_min_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(min_dim_min::name, min_dim_min::overload_name)
      .typed<min_dim_min::schema>();
}

// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> min_dim_min::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
    
    static auto op = create_min_dim_min_typed_handle();
    return op.call(self, dim, keepdim, min, min_indices);
}

// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> min_dim_min::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
    
    static auto op = create_min_dim_min_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, min, min_indices);
}

// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<min_names_dim::schema> create_min_names_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(min_names_dim::name, min_names_dim::overload_name)
      .typed<min_names_dim::schema>();
}

// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> min_names_dim::call(const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_min_names_dim_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> min_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_min_names_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<min_names_dim_min::schema> create_min_names_dim_min_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(min_names_dim_min::name, min_names_dim_min::overload_name)
      .typed<min_names_dim_min::schema>();
}

// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> min_names_dim_min::call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
    
    static auto op = create_min_names_dim_min_typed_handle();
    return op.call(self, dim, keepdim, min, min_indices);
}

// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> min_names_dim_min::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
    
    static auto op = create_min_names_dim_min_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, min, min_indices);
}

// aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<amin::schema> create_amin_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(amin::name, amin::overload_name)
      .typed<amin::schema>();
}

// aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
at::Tensor amin::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_amin_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
at::Tensor amin::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_amin_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<amin_out::schema> create_amin_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(amin_out::name, amin_out::overload_name)
      .typed<amin_out::schema>();
}

// aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & amin_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_amin_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & amin_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_amin_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_mps_convolution::schema> create__mps_convolution_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_mps_convolution::name, _mps_convolution::overload_name)
      .typed<_mps_convolution::schema>();
}

// aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor _mps_convolution::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create__mps_convolution_typed_handle();
    return op.call(self, weight, bias, padding, stride, dilation, groups);
}

// aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor _mps_convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create__mps_convolution_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups);
}

// aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<mps_convolution_backward::schema> create_mps_convolution_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mps_convolution_backward::name, mps_convolution_backward::overload_name)
      .typed<mps_convolution_backward::schema>();
}

// aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> mps_convolution_backward::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,3> output_mask) {
    
    static auto op = create_mps_convolution_backward_typed_handle();
    return op.call(self, grad_output, weight, padding, stride, dilation, groups, output_mask);
}

// aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> mps_convolution_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,3> output_mask) {
    
    static auto op = create_mps_convolution_backward_typed_handle();
    return op.redispatch(dispatchKeySet, self, grad_output, weight, padding, stride, dilation, groups, output_mask);
}

// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_convolution::schema> create_mkldnn_convolution_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_convolution::name, mkldnn_convolution::overload_name)
      .typed<mkldnn_convolution::schema>();
}

// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor mkldnn_convolution::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_mkldnn_convolution_typed_handle();
    return op.call(self, weight, bias, padding, stride, dilation, groups);
}

// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor mkldnn_convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_mkldnn_convolution_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups);
}

// aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_rnn_layer::schema> create_mkldnn_rnn_layer_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_rnn_layer::name, mkldnn_rnn_layer::overload_name)
      .typed<mkldnn_rnn_layer::schema>();
}

// aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer::call(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) {
    
    static auto op = create_mkldnn_rnn_layer_typed_handle();
    return op.call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
}

// aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) {
    
    static auto op = create_mkldnn_rnn_layer_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
}

// aten::mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_rnn_layer_backward::schema> create_mkldnn_rnn_layer_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_rnn_layer_backward::name, mkldnn_rnn_layer_backward::overload_name)
      .typed<mkldnn_rnn_layer_backward::schema>();
}

// aten::mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_backward::call(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) {
    
    static auto op = create_mkldnn_rnn_layer_backward_typed_handle();
    return op.call(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace);
}

// aten::mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) {
    
    static auto op = create_mkldnn_rnn_layer_backward_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace);
}

// aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<miopen_batch_norm::schema> create_miopen_batch_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_batch_norm::name, miopen_batch_norm::overload_name)
      .typed<miopen_batch_norm::schema>();
}

// aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
    
    static auto op = create_miopen_batch_norm_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
}

// aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
    
    static auto op = create_miopen_batch_norm_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
}

// aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<miopen_batch_norm_backward::schema> create_miopen_batch_norm_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_batch_norm_backward::name, miopen_batch_norm_backward::overload_name)
      .typed<miopen_batch_norm_backward::schema>();
}

// aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward::call(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon) {
    
    static auto op = create_miopen_batch_norm_backward_typed_handle();
    return op.call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon);
}

// aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon) {
    
    static auto op = create_miopen_batch_norm_backward_typed_handle();
    return op.redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon);
}

// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<miopen_convolution::schema> create_miopen_convolution_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_convolution::name, miopen_convolution::overload_name)
      .typed<miopen_convolution::schema>();
}

// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor
at::Tensor miopen_convolution::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) {
    
    static auto op = create_miopen_convolution_typed_handle();
    return op.call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
}

// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor
at::Tensor miopen_convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) {
    
    static auto op = create_miopen_convolution_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
}

// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<miopen_convolution_transpose::schema> create_miopen_convolution_transpose_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_convolution_transpose::name, miopen_convolution_transpose::overload_name)
      .typed<miopen_convolution_transpose::schema>();
}

// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor
at::Tensor miopen_convolution_transpose::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) {
    
    static auto op = create_miopen_convolution_transpose_typed_handle();
    return op.call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
}

// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor
at::Tensor miopen_convolution_transpose::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) {
    
    static auto op = create_miopen_convolution_transpose_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
}

// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<miopen_depthwise_convolution::schema> create_miopen_depthwise_convolution_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_depthwise_convolution::name, miopen_depthwise_convolution::overload_name)
      .typed<miopen_depthwise_convolution::schema>();
}

// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor
at::Tensor miopen_depthwise_convolution::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) {
    
    static auto op = create_miopen_depthwise_convolution_typed_handle();
    return op.call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
}

// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor
at::Tensor miopen_depthwise_convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) {
    
    static auto op = create_miopen_depthwise_convolution_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
}

// aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<miopen_convolution_relu::schema> create_miopen_convolution_relu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_convolution_relu::name, miopen_convolution_relu::overload_name)
      .typed<miopen_convolution_relu::schema>();
}

// aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor miopen_convolution_relu::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_miopen_convolution_relu_typed_handle();
    return op.call(self, weight, bias, stride, padding, dilation, groups);
}

// aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor miopen_convolution_relu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_miopen_convolution_relu_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, stride, padding, dilation, groups);
}

// aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<miopen_convolution_add_relu::schema> create_miopen_convolution_add_relu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_convolution_add_relu::name, miopen_convolution_add_relu::overload_name)
      .typed<miopen_convolution_add_relu::schema>();
}

// aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor miopen_convolution_add_relu::call(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const ::std::optional<at::Scalar> & alpha, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_miopen_convolution_add_relu_typed_handle();
    return op.call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
}

// aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor
at::Tensor miopen_convolution_add_relu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const ::std::optional<at::Scalar> & alpha, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
    
    static auto op = create_miopen_convolution_add_relu_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, z, alpha, bias, stride, padding, dilation, groups);
}

// aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<miopen_rnn::schema> create_miopen_rnn_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_rnn::name, miopen_rnn::overload_name)
      .typed<miopen_rnn::schema>();
}

// aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> miopen_rnn::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state) {
    
    static auto op = create_miopen_rnn_typed_handle();
    return op.call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
}

// aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> miopen_rnn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state) {
    
    static auto op = create_miopen_rnn_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
}

// aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
static C10_NOINLINE c10::TypedOperatorHandle<miopen_rnn_backward::schema> create_miopen_rnn_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_rnn_backward::name, miopen_rnn_backward::overload_name)
      .typed<miopen_rnn_backward::schema>();
}

// aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> miopen_rnn_backward::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
    
    static auto op = create_miopen_rnn_backward_typed_handle();
    return op.call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
}

// aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> miopen_rnn_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
    
    static auto op = create_miopen_rnn_backward_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
}

// aten::mm(Tensor self, Tensor mat2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mm::schema> create_mm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mm::name, mm::overload_name)
      .typed<mm::schema>();
}

// aten::mm(Tensor self, Tensor mat2) -> Tensor
at::Tensor mm::call(const at::Tensor & self, const at::Tensor & mat2) {
    
    static auto op = create_mm_typed_handle();
    return op.call(self, mat2);
}

// aten::mm(Tensor self, Tensor mat2) -> Tensor
at::Tensor mm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2) {
    
    static auto op = create_mm_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat2);
}

// aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mm_out::schema> create_mm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mm_out::name, mm_out::overload_name)
      .typed<mm_out::schema>();
}

// aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mm_out::call(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
    
    static auto op = create_mm_out_typed_handle();
    return op.call(self, mat2, out);
}

// aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
    
    static auto op = create_mm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat2, out);
}

// aten::_int_mm(Tensor self, Tensor mat2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_int_mm::schema> create__int_mm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_int_mm::name, _int_mm::overload_name)
      .typed<_int_mm::schema>();
}

// aten::_int_mm(Tensor self, Tensor mat2) -> Tensor
at::Tensor _int_mm::call(const at::Tensor & self, const at::Tensor & mat2) {
    
    static auto op = create__int_mm_typed_handle();
    return op.call(self, mat2);
}

// aten::_int_mm(Tensor self, Tensor mat2) -> Tensor
at::Tensor _int_mm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2) {
    
    static auto op = create__int_mm_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat2);
}

// aten::_int_mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_int_mm_out::schema> create__int_mm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_int_mm_out::name, _int_mm_out::overload_name)
      .typed<_int_mm_out::schema>();
}

// aten::_int_mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _int_mm_out::call(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
    
    static auto op = create__int_mm_out_typed_handle();
    return op.call(self, mat2, out);
}

// aten::_int_mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _int_mm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
    
    static auto op = create__int_mm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat2, out);
}

// aten::_convert_weight_to_int4pack(Tensor self, int innerKTiles) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_convert_weight_to_int4pack::schema> create__convert_weight_to_int4pack_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_convert_weight_to_int4pack::name, _convert_weight_to_int4pack::overload_name)
      .typed<_convert_weight_to_int4pack::schema>();
}

// aten::_convert_weight_to_int4pack(Tensor self, int innerKTiles) -> Tensor
at::Tensor _convert_weight_to_int4pack::call(const at::Tensor & self, int64_t innerKTiles) {
    
    static auto op = create__convert_weight_to_int4pack_typed_handle();
    return op.call(self, innerKTiles);
}

// aten::_convert_weight_to_int4pack(Tensor self, int innerKTiles) -> Tensor
at::Tensor _convert_weight_to_int4pack::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t innerKTiles) {
    
    static auto op = create__convert_weight_to_int4pack_typed_handle();
    return op.redispatch(dispatchKeySet, self, innerKTiles);
}

// aten::_weight_int4pack_mm(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_weight_int4pack_mm::schema> create__weight_int4pack_mm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_weight_int4pack_mm::name, _weight_int4pack_mm::overload_name)
      .typed<_weight_int4pack_mm::schema>();
}

// aten::_weight_int4pack_mm(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor
at::Tensor _weight_int4pack_mm::call(const at::Tensor & self, const at::Tensor & mat2, int64_t qGroupSize, const at::Tensor & qScaleAndZeros) {
    
    static auto op = create__weight_int4pack_mm_typed_handle();
    return op.call(self, mat2, qGroupSize, qScaleAndZeros);
}

// aten::_weight_int4pack_mm(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor
at::Tensor _weight_int4pack_mm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, int64_t qGroupSize, const at::Tensor & qScaleAndZeros) {
    
    static auto op = create__weight_int4pack_mm_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat2, qGroupSize, qScaleAndZeros);
}

// aten::_convert_weight_to_int4pack_for_cpu(Tensor self, int innerKTiles) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_convert_weight_to_int4pack_for_cpu::schema> create__convert_weight_to_int4pack_for_cpu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_convert_weight_to_int4pack_for_cpu::name, _convert_weight_to_int4pack_for_cpu::overload_name)
      .typed<_convert_weight_to_int4pack_for_cpu::schema>();
}

// aten::_convert_weight_to_int4pack_for_cpu(Tensor self, int innerKTiles) -> Tensor
at::Tensor _convert_weight_to_int4pack_for_cpu::call(const at::Tensor & self, int64_t innerKTiles) {
    
    static auto op = create__convert_weight_to_int4pack_for_cpu_typed_handle();
    return op.call(self, innerKTiles);
}

// aten::_convert_weight_to_int4pack_for_cpu(Tensor self, int innerKTiles) -> Tensor
at::Tensor _convert_weight_to_int4pack_for_cpu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t innerKTiles) {
    
    static auto op = create__convert_weight_to_int4pack_for_cpu_typed_handle();
    return op.redispatch(dispatchKeySet, self, innerKTiles);
}

// aten::_weight_int4pack_mm_for_cpu(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_weight_int4pack_mm_for_cpu::schema> create__weight_int4pack_mm_for_cpu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_weight_int4pack_mm_for_cpu::name, _weight_int4pack_mm_for_cpu::overload_name)
      .typed<_weight_int4pack_mm_for_cpu::schema>();
}

// aten::_weight_int4pack_mm_for_cpu(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor
at::Tensor _weight_int4pack_mm_for_cpu::call(const at::Tensor & self, const at::Tensor & mat2, int64_t qGroupSize, const at::Tensor & qScaleAndZeros) {
    
    static auto op = create__weight_int4pack_mm_for_cpu_typed_handle();
    return op.call(self, mat2, qGroupSize, qScaleAndZeros);
}

// aten::_weight_int4pack_mm_for_cpu(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor
at::Tensor _weight_int4pack_mm_for_cpu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, int64_t qGroupSize, const at::Tensor & qScaleAndZeros) {
    
    static auto op = create__weight_int4pack_mm_for_cpu_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat2, qGroupSize, qScaleAndZeros);
}

// aten::_dyn_quant_pack_4bit_weight(Tensor weights, Tensor scales_zeros, Tensor? bias, int block_size, int in_features, int out_features) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_dyn_quant_pack_4bit_weight::schema> create__dyn_quant_pack_4bit_weight_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_dyn_quant_pack_4bit_weight::name, _dyn_quant_pack_4bit_weight::overload_name)
      .typed<_dyn_quant_pack_4bit_weight::schema>();
}

// aten::_dyn_quant_pack_4bit_weight(Tensor weights, Tensor scales_zeros, Tensor? bias, int block_size, int in_features, int out_features) -> Tensor
at::Tensor _dyn_quant_pack_4bit_weight::call(const at::Tensor & weights, const at::Tensor & scales_zeros, const ::std::optional<at::Tensor> & bias, int64_t block_size, int64_t in_features, int64_t out_features) {
    
    static auto op = create__dyn_quant_pack_4bit_weight_typed_handle();
    return op.call(weights, scales_zeros, bias, block_size, in_features, out_features);
}

// aten::_dyn_quant_pack_4bit_weight(Tensor weights, Tensor scales_zeros, Tensor? bias, int block_size, int in_features, int out_features) -> Tensor
at::Tensor _dyn_quant_pack_4bit_weight::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weights, const at::Tensor & scales_zeros, const ::std::optional<at::Tensor> & bias, int64_t block_size, int64_t in_features, int64_t out_features) {
    
    static auto op = create__dyn_quant_pack_4bit_weight_typed_handle();
    return op.redispatch(dispatchKeySet, weights, scales_zeros, bias, block_size, in_features, out_features);
}

// aten::_dyn_quant_matmul_4bit(Tensor inp, Tensor packed_weights, int block_size, int in_features, int out_features) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_dyn_quant_matmul_4bit::schema> create__dyn_quant_matmul_4bit_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_dyn_quant_matmul_4bit::name, _dyn_quant_matmul_4bit::overload_name)
      .typed<_dyn_quant_matmul_4bit::schema>();
}

// aten::_dyn_quant_matmul_4bit(Tensor inp, Tensor packed_weights, int block_size, int in_features, int out_features) -> Tensor
at::Tensor _dyn_quant_matmul_4bit::call(const at::Tensor & inp, const at::Tensor & packed_weights, int64_t block_size, int64_t in_features, int64_t out_features) {
    
    static auto op = create__dyn_quant_matmul_4bit_typed_handle();
    return op.call(inp, packed_weights, block_size, in_features, out_features);
}

// aten::_dyn_quant_matmul_4bit(Tensor inp, Tensor packed_weights, int block_size, int in_features, int out_features) -> Tensor
at::Tensor _dyn_quant_matmul_4bit::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & inp, const at::Tensor & packed_weights, int64_t block_size, int64_t in_features, int64_t out_features) {
    
    static auto op = create__dyn_quant_matmul_4bit_typed_handle();
    return op.redispatch(dispatchKeySet, inp, packed_weights, block_size, in_features, out_features);
}

// aten::_weight_int8pack_mm(Tensor self, Tensor mat2, Tensor scales) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_weight_int8pack_mm::schema> create__weight_int8pack_mm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_weight_int8pack_mm::name, _weight_int8pack_mm::overload_name)
      .typed<_weight_int8pack_mm::schema>();
}

// aten::_weight_int8pack_mm(Tensor self, Tensor mat2, Tensor scales) -> Tensor
at::Tensor _weight_int8pack_mm::call(const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scales) {
    
    static auto op = create__weight_int8pack_mm_typed_handle();
    return op.call(self, mat2, scales);
}

// aten::_weight_int8pack_mm(Tensor self, Tensor mat2, Tensor scales) -> Tensor
at::Tensor _weight_int8pack_mm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scales) {
    
    static auto op = create__weight_int8pack_mm_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat2, scales);
}

// aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_mm::schema> create__sparse_mm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_mm::name, _sparse_mm::overload_name)
      .typed<_sparse_mm::schema>();
}

// aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor
at::Tensor _sparse_mm::call(const at::Tensor & sparse, const at::Tensor & dense) {
    
    static auto op = create__sparse_mm_typed_handle();
    return op.call(sparse, dense);
}

// aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor
at::Tensor _sparse_mm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sparse, const at::Tensor & dense) {
    
    static auto op = create__sparse_mm_typed_handle();
    return op.redispatch(dispatchKeySet, sparse, dense);
}

// aten::_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_mm_reduce::schema> create__sparse_mm_reduce_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_mm_reduce::name, _sparse_mm_reduce::overload_name)
      .typed<_sparse_mm_reduce::schema>();
}

// aten::_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor
at::Tensor _sparse_mm_reduce::call(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) {
    
    static auto op = create__sparse_mm_reduce_typed_handle();
    return op.call(sparse, dense, reduce);
}

// aten::_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor
at::Tensor _sparse_mm_reduce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) {
    
    static auto op = create__sparse_mm_reduce_typed_handle();
    return op.redispatch(dispatchKeySet, sparse, dense, reduce);
}

// aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sparse_matmul::schema> create__sparse_sparse_matmul_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_sparse_matmul::name, _sparse_sparse_matmul::overload_name)
      .typed<_sparse_sparse_matmul::schema>();
}

// aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor
at::Tensor _sparse_sparse_matmul::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create__sparse_sparse_matmul_typed_handle();
    return op.call(self, other);
}

// aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor
at::Tensor _sparse_sparse_matmul::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create__sparse_sparse_matmul_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<mode::schema> create_mode_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mode::name, mode::overload_name)
      .typed<mode::schema>();
}

// aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> mode::call(const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_mode_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> mode::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim) {
    
    static auto op = create_mode_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<mode_values::schema> create_mode_values_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mode_values::name, mode_values::overload_name)
      .typed<mode_values::schema>();
}

// aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> mode_values::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_mode_values_typed_handle();
    return op.call(self, dim, keepdim, values, indices);
}

// aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> mode_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_mode_values_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, values, indices);
}

// aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<mode_dimname::schema> create_mode_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mode_dimname::name, mode_dimname::overload_name)
      .typed<mode_dimname::schema>();
}

// aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> mode_dimname::call(const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_mode_dimname_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> mode_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim) {
    
    static auto op = create_mode_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<mode_dimname_out::schema> create_mode_dimname_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mode_dimname_out::name, mode_dimname_out::overload_name)
      .typed<mode_dimname_out::schema>();
}

// aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> mode_dimname_out::call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_mode_dimname_out_typed_handle();
    return op.call(self, dim, keepdim, values, indices);
}

// aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> mode_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_mode_dimname_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, values, indices);
}

// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mul_Tensor::schema> create_mul_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mul_Tensor::name, mul_Tensor::overload_name)
      .typed<mul_Tensor::schema>();
}

// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor mul_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_mul_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor mul_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_mul_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mul__Tensor::schema> create_mul__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mul__Tensor::name, mul__Tensor::overload_name)
      .typed<mul__Tensor::schema>();
}

// aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & mul__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_mul__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & mul__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_mul__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mul_out::schema> create_mul_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mul_out::name, mul_out::overload_name)
      .typed<mul_out::schema>();
}

// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mul_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_mul_out_typed_handle();
    return op.call(self, other, out);
}

// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mul_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_mul_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::mul.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mul_Scalar::schema> create_mul_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mul_Scalar::name, mul_Scalar::overload_name)
      .typed<mul_Scalar::schema>();
}

// aten::mul.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor mul_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_mul_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::mul.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor mul_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_mul_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mul__Scalar::schema> create_mul__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mul__Scalar::name, mul__Scalar::overload_name)
      .typed<mul__Scalar::schema>();
}

// aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & mul__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_mul__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & mul__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_mul__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<multiply_Tensor::schema> create_multiply_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multiply_Tensor::name, multiply_Tensor::overload_name)
      .typed<multiply_Tensor::schema>();
}

// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor multiply_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_multiply_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor multiply_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_multiply_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<multiply__Tensor::schema> create_multiply__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multiply__Tensor::name, multiply__Tensor::overload_name)
      .typed<multiply__Tensor::schema>();
}

// aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & multiply__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_multiply__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & multiply__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_multiply__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<multiply_out::schema> create_multiply_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multiply_out::name, multiply_out::overload_name)
      .typed<multiply_out::schema>();
}

// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & multiply_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_multiply_out_typed_handle();
    return op.call(self, other, out);
}

// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & multiply_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_multiply_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<multiply_Scalar::schema> create_multiply_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multiply_Scalar::name, multiply_Scalar::overload_name)
      .typed<multiply_Scalar::schema>();
}

// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor multiply_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_multiply_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor multiply_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_multiply_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<multiply__Scalar::schema> create_multiply__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multiply__Scalar::name, multiply__Scalar::overload_name)
      .typed<multiply__Scalar::schema>();
}

// aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & multiply__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_multiply__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & multiply__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_multiply__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::mv(Tensor self, Tensor vec) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mv::schema> create_mv_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mv::name, mv::overload_name)
      .typed<mv::schema>();
}

// aten::mv(Tensor self, Tensor vec) -> Tensor
at::Tensor mv::call(const at::Tensor & self, const at::Tensor & vec) {
    
    static auto op = create_mv_typed_handle();
    return op.call(self, vec);
}

// aten::mv(Tensor self, Tensor vec) -> Tensor
at::Tensor mv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec) {
    
    static auto op = create_mv_typed_handle();
    return op.redispatch(dispatchKeySet, self, vec);
}

// aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mv_out::schema> create_mv_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mv_out::name, mv_out::overload_name)
      .typed<mv_out::schema>();
}

// aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mv_out::call(const at::Tensor & self, const at::Tensor & vec, at::Tensor & out) {
    
    static auto op = create_mv_out_typed_handle();
    return op.call(self, vec, out);
}

// aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec, at::Tensor & out) {
    
    static auto op = create_mv_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, vec, out);
}

// aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mvlgamma_out::schema> create_mvlgamma_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mvlgamma_out::name, mvlgamma_out::overload_name)
      .typed<mvlgamma_out::schema>();
}

// aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mvlgamma_out::call(const at::Tensor & self, int64_t p, at::Tensor & out) {
    
    static auto op = create_mvlgamma_out_typed_handle();
    return op.call(self, p, out);
}

// aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mvlgamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p, at::Tensor & out) {
    
    static auto op = create_mvlgamma_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, out);
}

// aten::mvlgamma(Tensor self, int p) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mvlgamma::schema> create_mvlgamma_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mvlgamma::name, mvlgamma::overload_name)
      .typed<mvlgamma::schema>();
}

// aten::mvlgamma(Tensor self, int p) -> Tensor
at::Tensor mvlgamma::call(const at::Tensor & self, int64_t p) {
    
    static auto op = create_mvlgamma_typed_handle();
    return op.call(self, p);
}

// aten::mvlgamma(Tensor self, int p) -> Tensor
at::Tensor mvlgamma::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p) {
    
    static auto op = create_mvlgamma_typed_handle();
    return op.redispatch(dispatchKeySet, self, p);
}

// aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mvlgamma_::schema> create_mvlgamma__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mvlgamma_::name, mvlgamma_::overload_name)
      .typed<mvlgamma_::schema>();
}

// aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)
at::Tensor & mvlgamma_::call(at::Tensor & self, int64_t p) {
    
    static auto op = create_mvlgamma__typed_handle();
    return op.call(self, p);
}

// aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)
at::Tensor & mvlgamma_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t p) {
    
    static auto op = create_mvlgamma__typed_handle();
    return op.redispatch(dispatchKeySet, self, p);
}

// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<narrow_copy::schema> create_narrow_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(narrow_copy::name, narrow_copy::overload_name)
      .typed<narrow_copy::schema>();
}

// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
at::Tensor narrow_copy::call(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
    
    static auto op = create_narrow_copy_typed_handle();
    return op.call(self, dim, start, length);
}

// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
at::Tensor narrow_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
    
    static auto op = create_narrow_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, start, length);
}

// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<narrow_copy_out::schema> create_narrow_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(narrow_copy_out::name, narrow_copy_out::overload_name)
      .typed<narrow_copy_out::schema>();
}

// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & narrow_copy_out::call(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) {
    
    static auto op = create_narrow_copy_out_typed_handle();
    return op.call(self, dim, start, length, out);
}

// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & narrow_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) {
    
    static auto op = create_narrow_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, start, length, out);
}

// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<narrow::schema> create_narrow_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(narrow::name, narrow::overload_name)
      .typed<narrow::schema>();
}

// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)
at::Tensor narrow::call(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
    
    static auto op = create_narrow_typed_handle();
    return op.call(self, dim, start, length);
}

// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)
at::Tensor narrow::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
    
    static auto op = create_narrow_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, start, length);
}

// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<narrow_Tensor::schema> create_narrow_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(narrow_Tensor::name, narrow_Tensor::overload_name)
      .typed<narrow_Tensor::schema>();
}

// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)
at::Tensor narrow_Tensor::call(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) {
    
    static auto op = create_narrow_Tensor_typed_handle();
    return op.call(self, dim, start, length);
}

// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)
at::Tensor narrow_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) {
    
    static auto op = create_narrow_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, start, length);
}

// aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<native_batch_norm::schema> create_native_batch_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_batch_norm::name, native_batch_norm::overload_name)
      .typed<native_batch_norm::schema>();
}

// aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
    
    static auto op = create_native_batch_norm_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, training, momentum, eps);
}

// aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
    
    static auto op = create_native_batch_norm_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps);
}

// aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<native_batch_norm_out::schema> create_native_batch_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_batch_norm_out::name, native_batch_norm_out::overload_name)
      .typed<native_batch_norm_out::schema>();
}

// aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_out::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
    
    static auto op = create_native_batch_norm_out_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
}

// aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
    
    static auto op = create_native_batch_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
}

// aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_native_batch_norm_legit::schema> create__native_batch_norm_legit_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_native_batch_norm_legit::name, _native_batch_norm_legit::overload_name)
      .typed<_native_batch_norm_legit::schema>();
}

// aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
    
    static auto op = create__native_batch_norm_legit_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, training, momentum, eps);
}

// aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
    
    static auto op = create__native_batch_norm_legit_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps);
}

// aten::_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_native_batch_norm_legit_no_training::schema> create__native_batch_norm_legit_no_training_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_native_batch_norm_legit_no_training::name, _native_batch_norm_legit_no_training::overload_name)
      .typed<_native_batch_norm_legit_no_training::schema>();
}

// aten::_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_training::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) {
    
    static auto op = create__native_batch_norm_legit_no_training_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, momentum, eps);
}

// aten::_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_training::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) {
    
    static auto op = create__native_batch_norm_legit_no_training_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, momentum, eps);
}

// aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))
static C10_NOINLINE c10::TypedOperatorHandle<_native_batch_norm_legit_out::schema> create__native_batch_norm_legit_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_native_batch_norm_legit_out::name, _native_batch_norm_legit_out::overload_name)
      .typed<_native_batch_norm_legit_out::schema>();
}

// aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
    
    static auto op = create__native_batch_norm_legit_out_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
}

// aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
    
    static auto op = create__native_batch_norm_legit_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
}

// aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_native_batch_norm_legit_no_stats::schema> create__native_batch_norm_legit_no_stats_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_native_batch_norm_legit_no_stats::name, _native_batch_norm_legit_no_stats::overload_name)
      .typed<_native_batch_norm_legit_no_stats::schema>();
}

// aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_stats::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
    
    static auto op = create__native_batch_norm_legit_no_stats_typed_handle();
    return op.call(input, weight, bias, training, momentum, eps);
}

// aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_stats::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
    
    static auto op = create__native_batch_norm_legit_no_stats_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, training, momentum, eps);
}

// aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<_native_batch_norm_legit_no_stats_out::schema> create__native_batch_norm_legit_no_stats_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_native_batch_norm_legit_no_stats_out::name, _native_batch_norm_legit_no_stats_out::overload_name)
      .typed<_native_batch_norm_legit_no_stats_out::schema>();
}

// aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_no_stats_out::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
    
    static auto op = create__native_batch_norm_legit_no_stats_out_typed_handle();
    return op.call(input, weight, bias, training, momentum, eps, out, save_mean, save_invstd);
}

// aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_no_stats_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
    
    static auto op = create__native_batch_norm_legit_no_stats_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, training, momentum, eps, out, save_mean, save_invstd);
}

// aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_stats::schema> create_batch_norm_stats_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_stats::name, batch_norm_stats::overload_name)
      .typed<batch_norm_stats::schema>();
}

// aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> batch_norm_stats::call(const at::Tensor & input, double eps) {
    
    static auto op = create_batch_norm_stats_typed_handle();
    return op.call(input, eps);
}

// aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> batch_norm_stats::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double eps) {
    
    static auto op = create_batch_norm_stats_typed_handle();
    return op.redispatch(dispatchKeySet, input, eps);
}

// aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_elemt::schema> create_batch_norm_elemt_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_elemt::name, batch_norm_elemt::overload_name)
      .typed<batch_norm_elemt::schema>();
}

// aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor
at::Tensor batch_norm_elemt::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) {
    
    static auto op = create_batch_norm_elemt_typed_handle();
    return op.call(input, weight, bias, mean, invstd, eps);
}

// aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor
at::Tensor batch_norm_elemt::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) {
    
    static auto op = create_batch_norm_elemt_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, mean, invstd, eps);
}

// aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_elemt_out::schema> create_batch_norm_elemt_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_elemt_out::name, batch_norm_elemt_out::overload_name)
      .typed<batch_norm_elemt_out::schema>();
}

// aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & batch_norm_elemt_out::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps, at::Tensor & out) {
    
    static auto op = create_batch_norm_elemt_out_typed_handle();
    return op.call(input, weight, bias, mean, invstd, eps, out);
}

// aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & batch_norm_elemt_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps, at::Tensor & out) {
    
    static auto op = create_batch_norm_elemt_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, mean, invstd, eps, out);
}

// aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_gather_stats::schema> create_batch_norm_gather_stats_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_gather_stats::name, batch_norm_gather_stats::overload_name)
      .typed<batch_norm_gather_stats::schema>();
}

// aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats::call(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count) {
    
    static auto op = create_batch_norm_gather_stats_typed_handle();
    return op.call(input, mean, invstd, running_mean, running_var, momentum, eps, count);
}

// aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count) {
    
    static auto op = create_batch_norm_gather_stats_typed_handle();
    return op.redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, count);
}

// aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_gather_stats_with_counts::schema> create_batch_norm_gather_stats_with_counts_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_gather_stats_with_counts::name, batch_norm_gather_stats_with_counts::overload_name)
      .typed<batch_norm_gather_stats_with_counts::schema>();
}

// aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_with_counts::call(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts) {
    
    static auto op = create_batch_norm_gather_stats_with_counts_typed_handle();
    return op.call(input, mean, invstd, running_mean, running_var, momentum, eps, counts);
}

// aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_with_counts::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts) {
    
    static auto op = create_batch_norm_gather_stats_with_counts_typed_handle();
    return op.redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, counts);
}

// aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<native_batch_norm_backward::schema> create_native_batch_norm_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_batch_norm_backward::name, native_batch_norm_backward::overload_name)
      .typed<native_batch_norm_backward::schema>();
}

// aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_backward::call(const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
    
    static auto op = create_native_batch_norm_backward_typed_handle();
    return op.call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
}

// aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
    
    static auto op = create_native_batch_norm_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
}

// aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_backward_reduce::schema> create_batch_norm_backward_reduce_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_backward_reduce::name, batch_norm_backward_reduce::overload_name)
      .typed<batch_norm_backward_reduce::schema>();
}

// aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_reduce::call(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g) {
    
    static auto op = create_batch_norm_backward_reduce_typed_handle();
    return op.call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g);
}

// aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_reduce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g) {
    
    static auto op = create_batch_norm_backward_reduce_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g);
}

// aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_backward_elemt::schema> create_batch_norm_backward_elemt_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_backward_elemt::name, batch_norm_backward_elemt::overload_name)
      .typed<batch_norm_backward_elemt::schema>();
}

// aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count) -> Tensor
at::Tensor batch_norm_backward_elemt::call(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count) {
    
    static auto op = create_batch_norm_backward_elemt_typed_handle();
    return op.call(grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
}

// aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count) -> Tensor
at::Tensor batch_norm_backward_elemt::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count) {
    
    static auto op = create_batch_norm_backward_elemt_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
}

// aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_update_stats::schema> create_batch_norm_update_stats_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_update_stats::name, batch_norm_update_stats::overload_name)
      .typed<batch_norm_update_stats::schema>();
}

// aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> batch_norm_update_stats::call(const at::Tensor & input, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum) {
    
    static auto op = create_batch_norm_update_stats_typed_handle();
    return op.call(input, running_mean, running_var, momentum);
}

// aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> batch_norm_update_stats::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum) {
    
    static auto op = create_batch_norm_update_stats_typed_handle();
    return op.redispatch(dispatchKeySet, input, running_mean, running_var, momentum);
}

// aten::is_vulkan_available() -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_vulkan_available::schema> create_is_vulkan_available_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_vulkan_available::name, is_vulkan_available::overload_name)
      .typed<is_vulkan_available::schema>();
}

// aten::is_vulkan_available() -> bool
bool is_vulkan_available::call() {
    
    static auto op = create_is_vulkan_available_typed_handle();
    return op.call();
}

// aten::is_vulkan_available() -> bool
bool is_vulkan_available::redispatch(c10::DispatchKeySet dispatchKeySet) {
    
    static auto op = create_is_vulkan_available_typed_handle();
    return op.redispatch(dispatchKeySet);
}

// aten::_nnpack_available() -> bool
static C10_NOINLINE c10::TypedOperatorHandle<_nnpack_available::schema> create__nnpack_available_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nnpack_available::name, _nnpack_available::overload_name)
      .typed<_nnpack_available::schema>();
}

// aten::_nnpack_available() -> bool
bool _nnpack_available::call() {
    
    static auto op = create__nnpack_available_typed_handle();
    return op.call();
}

// aten::_nnpack_available() -> bool
bool _nnpack_available::redispatch(c10::DispatchKeySet dispatchKeySet) {
    
    static auto op = create__nnpack_available_typed_handle();
    return op.redispatch(dispatchKeySet);
}

// aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nnpack_spatial_convolution::schema> create__nnpack_spatial_convolution_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nnpack_spatial_convolution::name, _nnpack_spatial_convolution::overload_name)
      .typed<_nnpack_spatial_convolution::schema>();
}

// aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor
at::Tensor _nnpack_spatial_convolution::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride) {
    
    static auto op = create__nnpack_spatial_convolution_typed_handle();
    return op.call(input, weight, bias, padding, stride);
}

// aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor
at::Tensor _nnpack_spatial_convolution::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride) {
    
    static auto op = create__nnpack_spatial_convolution_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, padding, stride);
}

// aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ones_names::schema> create_ones_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ones_names::name, ones_names::overload_name)
      .typed<ones_names::schema>();
}

// aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor ones_names::call(at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_ones_names_typed_handle();
    return op.call(size, names, dtype, layout, device, pin_memory);
}

// aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor ones_names::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_ones_names_typed_handle();
    return op.redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory);
}

// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ones::schema> create_ones_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ones::name, ones::overload_name)
      .typed<ones::schema>();
}

// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor ones::call(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_ones_typed_handle();
    return op.call(size, dtype, layout, device, pin_memory);
}

// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor ones::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_ones_typed_handle();
    return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory);
}

// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ones_out::schema> create_ones_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ones_out::name, ones_out::overload_name)
      .typed<ones_out::schema>();
}

// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ones_out::call(c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_ones_out_typed_handle();
    return op.call(size, out);
}

// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ones_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_ones_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, out);
}

// aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ones_like::schema> create_ones_like_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ones_like::name, ones_like::overload_name)
      .typed<ones_like::schema>();
}

// aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor ones_like::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_ones_like_typed_handle();
    return op.call(self, dtype, layout, device, pin_memory, memory_format);
}

// aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor ones_like::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_ones_like_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format);
}

// aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<pairwise_distance::schema> create_pairwise_distance_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pairwise_distance::name, pairwise_distance::overload_name)
      .typed<pairwise_distance::schema>();
}

// aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor
at::Tensor pairwise_distance::call(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) {
    
    static auto op = create_pairwise_distance_typed_handle();
    return op.call(x1, x2, p, eps, keepdim);
}

// aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor
at::Tensor pairwise_distance::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) {
    
    static auto op = create_pairwise_distance_typed_handle();
    return op.redispatch(dispatchKeySet, x1, x2, p, eps, keepdim);
}

// aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cdist::schema> create_cdist_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cdist::name, cdist::overload_name)
      .typed<cdist::schema>();
}

// aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor
at::Tensor cdist::call(const at::Tensor & x1, const at::Tensor & x2, double p, ::std::optional<int64_t> compute_mode) {
    
    static auto op = create_cdist_typed_handle();
    return op.call(x1, x2, p, compute_mode);
}

// aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor
at::Tensor cdist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p, ::std::optional<int64_t> compute_mode) {
    
    static auto op = create_cdist_typed_handle();
    return op.redispatch(dispatchKeySet, x1, x2, p, compute_mode);
}

// aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_euclidean_dist::schema> create__euclidean_dist_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_euclidean_dist::name, _euclidean_dist::overload_name)
      .typed<_euclidean_dist::schema>();
}

// aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor
at::Tensor _euclidean_dist::call(const at::Tensor & x1, const at::Tensor & x2) {
    
    static auto op = create__euclidean_dist_typed_handle();
    return op.call(x1, x2);
}

// aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor
at::Tensor _euclidean_dist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2) {
    
    static auto op = create__euclidean_dist_typed_handle();
    return op.redispatch(dispatchKeySet, x1, x2);
}

// aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cdist_forward::schema> create__cdist_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cdist_forward::name, _cdist_forward::overload_name)
      .typed<_cdist_forward::schema>();
}

// aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor
at::Tensor _cdist_forward::call(const at::Tensor & x1, const at::Tensor & x2, double p, ::std::optional<int64_t> compute_mode) {
    
    static auto op = create__cdist_forward_typed_handle();
    return op.call(x1, x2, p, compute_mode);
}

// aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor
at::Tensor _cdist_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p, ::std::optional<int64_t> compute_mode) {
    
    static auto op = create__cdist_forward_typed_handle();
    return op.redispatch(dispatchKeySet, x1, x2, p, compute_mode);
}

// aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cdist_backward::schema> create__cdist_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cdist_backward::name, _cdist_backward::overload_name)
      .typed<_cdist_backward::schema>();
}

// aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor
at::Tensor _cdist_backward::call(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
    
    static auto op = create__cdist_backward_typed_handle();
    return op.call(grad, x1, x2, p, cdist);
}

// aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor
at::Tensor _cdist_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
    
    static auto op = create__cdist_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, x1, x2, p, cdist);
}

// aten::pdist(Tensor self, float p=2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<pdist::schema> create_pdist_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pdist::name, pdist::overload_name)
      .typed<pdist::schema>();
}

// aten::pdist(Tensor self, float p=2) -> Tensor
at::Tensor pdist::call(const at::Tensor & self, double p) {
    
    static auto op = create_pdist_typed_handle();
    return op.call(self, p);
}

// aten::pdist(Tensor self, float p=2) -> Tensor
at::Tensor pdist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p) {
    
    static auto op = create_pdist_typed_handle();
    return op.redispatch(dispatchKeySet, self, p);
}

// aten::_pdist_forward(Tensor self, float p=2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_pdist_forward::schema> create__pdist_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_pdist_forward::name, _pdist_forward::overload_name)
      .typed<_pdist_forward::schema>();
}

// aten::_pdist_forward(Tensor self, float p=2) -> Tensor
at::Tensor _pdist_forward::call(const at::Tensor & self, double p) {
    
    static auto op = create__pdist_forward_typed_handle();
    return op.call(self, p);
}

// aten::_pdist_forward(Tensor self, float p=2) -> Tensor
at::Tensor _pdist_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p) {
    
    static auto op = create__pdist_forward_typed_handle();
    return op.redispatch(dispatchKeySet, self, p);
}

// aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_pdist_backward::schema> create__pdist_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_pdist_backward::name, _pdist_backward::overload_name)
      .typed<_pdist_backward::schema>();
}

// aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor
at::Tensor _pdist_backward::call(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) {
    
    static auto op = create__pdist_backward_typed_handle();
    return op.call(grad, self, p, pdist);
}

// aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor
at::Tensor _pdist_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) {
    
    static auto op = create__pdist_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, self, p, pdist);
}

// aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cosine_similarity::schema> create_cosine_similarity_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cosine_similarity::name, cosine_similarity::overload_name)
      .typed<cosine_similarity::schema>();
}

// aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor
at::Tensor cosine_similarity::call(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) {
    
    static auto op = create_cosine_similarity_typed_handle();
    return op.call(x1, x2, dim, eps);
}

// aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor
at::Tensor cosine_similarity::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) {
    
    static auto op = create_cosine_similarity_typed_handle();
    return op.redispatch(dispatchKeySet, x1, x2, dim, eps);
}

// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<permute::schema> create_permute_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(permute::name, permute::overload_name)
      .typed<permute::schema>();
}

// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)
at::Tensor permute::call(const at::Tensor & self, at::IntArrayRef dims) {
    
    static auto op = create_permute_typed_handle();
    return op.call(self, dims);
}

// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)
at::Tensor permute::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) {
    
    static auto op = create_permute_typed_handle();
    return op.redispatch(dispatchKeySet, self, dims);
}

// aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<movedim_intlist::schema> create_movedim_intlist_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(movedim_intlist::name, movedim_intlist::overload_name)
      .typed<movedim_intlist::schema>();
}

// aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
at::Tensor movedim_intlist::call(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
    
    static auto op = create_movedim_intlist_typed_handle();
    return op.call(self, source, destination);
}

// aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
at::Tensor movedim_intlist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
    
    static auto op = create_movedim_intlist_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, destination);
}

// aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<movedim_int::schema> create_movedim_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(movedim_int::name, movedim_int::overload_name)
      .typed<movedim_int::schema>();
}

// aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)
at::Tensor movedim_int::call(const at::Tensor & self, int64_t source, int64_t destination) {
    
    static auto op = create_movedim_int_typed_handle();
    return op.call(self, source, destination);
}

// aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)
at::Tensor movedim_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t source, int64_t destination) {
    
    static auto op = create_movedim_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, destination);
}

// aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<moveaxis_intlist::schema> create_moveaxis_intlist_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(moveaxis_intlist::name, moveaxis_intlist::overload_name)
      .typed<moveaxis_intlist::schema>();
}

// aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
at::Tensor moveaxis_intlist::call(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
    
    static auto op = create_moveaxis_intlist_typed_handle();
    return op.call(self, source, destination);
}

// aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
at::Tensor moveaxis_intlist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
    
    static auto op = create_moveaxis_intlist_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, destination);
}

// aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<moveaxis_int::schema> create_moveaxis_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(moveaxis_int::name, moveaxis_int::overload_name)
      .typed<moveaxis_int::schema>();
}

// aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)
at::Tensor moveaxis_int::call(const at::Tensor & self, int64_t source, int64_t destination) {
    
    static auto op = create_moveaxis_int_typed_handle();
    return op.call(self, source, destination);
}

// aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)
at::Tensor moveaxis_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t source, int64_t destination) {
    
    static auto op = create_moveaxis_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, destination);
}

// aten::numpy_T(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<numpy_T::schema> create_numpy_T_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(numpy_T::name, numpy_T::overload_name)
      .typed<numpy_T::schema>();
}

// aten::numpy_T(Tensor(a) self) -> Tensor(a)
at::Tensor numpy_T::call(const at::Tensor & self) {
    
    static auto op = create_numpy_T_typed_handle();
    return op.call(self);
}

// aten::numpy_T(Tensor(a) self) -> Tensor(a)
at::Tensor numpy_T::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_numpy_T_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::matrix_H(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<matrix_H::schema> create_matrix_H_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(matrix_H::name, matrix_H::overload_name)
      .typed<matrix_H::schema>();
}

// aten::matrix_H(Tensor(a) self) -> Tensor(a)
at::Tensor matrix_H::call(const at::Tensor & self) {
    
    static auto op = create_matrix_H_typed_handle();
    return op.call(self);
}

// aten::matrix_H(Tensor(a) self) -> Tensor(a)
at::Tensor matrix_H::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_matrix_H_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::mT(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<mT::schema> create_mT_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mT::name, mT::overload_name)
      .typed<mT::schema>();
}

// aten::mT(Tensor(a) self) -> Tensor(a)
at::Tensor mT::call(const at::Tensor & self) {
    
    static auto op = create_mT_typed_handle();
    return op.call(self);
}

// aten::mT(Tensor(a) self) -> Tensor(a)
at::Tensor mT::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_mT_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::mH(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<mH::schema> create_mH_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mH::name, mH::overload_name)
      .typed<mH::schema>();
}

// aten::mH(Tensor(a) self) -> Tensor(a)
at::Tensor mH::call(const at::Tensor & self) {
    
    static auto op = create_mH_typed_handle();
    return op.call(self);
}

// aten::mH(Tensor(a) self) -> Tensor(a)
at::Tensor mH::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_mH_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::adjoint(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<adjoint::schema> create_adjoint_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adjoint::name, adjoint::overload_name)
      .typed<adjoint::schema>();
}

// aten::adjoint(Tensor(a) self) -> Tensor(a)
at::Tensor adjoint::call(const at::Tensor & self) {
    
    static auto op = create_adjoint_typed_handle();
    return op.call(self);
}

// aten::adjoint(Tensor(a) self) -> Tensor(a)
at::Tensor adjoint::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_adjoint_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<pixel_shuffle::schema> create_pixel_shuffle_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pixel_shuffle::name, pixel_shuffle::overload_name)
      .typed<pixel_shuffle::schema>();
}

// aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor
at::Tensor pixel_shuffle::call(const at::Tensor & self, int64_t upscale_factor) {
    
    static auto op = create_pixel_shuffle_typed_handle();
    return op.call(self, upscale_factor);
}

// aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor
at::Tensor pixel_shuffle::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t upscale_factor) {
    
    static auto op = create_pixel_shuffle_typed_handle();
    return op.redispatch(dispatchKeySet, self, upscale_factor);
}

// aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<pixel_unshuffle::schema> create_pixel_unshuffle_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pixel_unshuffle::name, pixel_unshuffle::overload_name)
      .typed<pixel_unshuffle::schema>();
}

// aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor
at::Tensor pixel_unshuffle::call(const at::Tensor & self, int64_t downscale_factor) {
    
    static auto op = create_pixel_unshuffle_typed_handle();
    return op.call(self, downscale_factor);
}

// aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor
at::Tensor pixel_unshuffle::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t downscale_factor) {
    
    static auto op = create_pixel_unshuffle_typed_handle();
    return op.redispatch(dispatchKeySet, self, downscale_factor);
}

// aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<channel_shuffle::schema> create_channel_shuffle_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(channel_shuffle::name, channel_shuffle::overload_name)
      .typed<channel_shuffle::schema>();
}

// aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor
at::Tensor channel_shuffle::call(const at::Tensor & self, c10::SymInt groups) {
    
    static auto op = create_channel_shuffle_typed_handle();
    return op.call(self, groups);
}

// aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor
at::Tensor channel_shuffle::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt groups) {
    
    static auto op = create_channel_shuffle_typed_handle();
    return op.redispatch(dispatchKeySet, self, groups);
}

// aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<native_channel_shuffle::schema> create_native_channel_shuffle_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_channel_shuffle::name, native_channel_shuffle::overload_name)
      .typed<native_channel_shuffle::schema>();
}

// aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor
at::Tensor native_channel_shuffle::call(const at::Tensor & self, c10::SymInt groups) {
    
    static auto op = create_native_channel_shuffle_typed_handle();
    return op.call(self, groups);
}

// aten::native_channel_shuffle(Tensor self, SymInt groups) -> Tensor
at::Tensor native_channel_shuffle::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt groups) {
    
    static auto op = create_native_channel_shuffle_typed_handle();
    return op.redispatch(dispatchKeySet, self, groups);
}

// aten::is_pinned(Tensor self, Device? device=None) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_pinned::schema> create_is_pinned_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_pinned::name, is_pinned::overload_name)
      .typed<is_pinned::schema>();
}

// aten::is_pinned(Tensor self, Device? device=None) -> bool
bool is_pinned::call(const at::Tensor & self, ::std::optional<at::Device> device) {
    
    static auto op = create_is_pinned_typed_handle();
    return op.call(self, device);
}

// aten::is_pinned(Tensor self, Device? device=None) -> bool
bool is_pinned::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Device> device) {
    
    static auto op = create_is_pinned_typed_handle();
    return op.redispatch(dispatchKeySet, self, device);
}

// aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<pin_memory::schema> create_pin_memory_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pin_memory::name, pin_memory::overload_name)
      .typed<pin_memory::schema>();
}

// aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)
at::Tensor pin_memory::call(const at::Tensor & self, ::std::optional<at::Device> device) {
    
    static auto op = create_pin_memory_typed_handle();
    return op.call(self, device);
}

// aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)
at::Tensor pin_memory::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Device> device) {
    
    static auto op = create_pin_memory_typed_handle();
    return op.redispatch(dispatchKeySet, self, device);
}

// aten::_pin_memory(Tensor self, Device? device=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_pin_memory::schema> create__pin_memory_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_pin_memory::name, _pin_memory::overload_name)
      .typed<_pin_memory::schema>();
}

// aten::_pin_memory(Tensor self, Device? device=None) -> Tensor
at::Tensor _pin_memory::call(const at::Tensor & self, ::std::optional<at::Device> device) {
    
    static auto op = create__pin_memory_typed_handle();
    return op.call(self, device);
}

// aten::_pin_memory(Tensor self, Device? device=None) -> Tensor
at::Tensor _pin_memory::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Device> device) {
    
    static auto op = create__pin_memory_typed_handle();
    return op.redispatch(dispatchKeySet, self, device);
}

// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<pinverse::schema> create_pinverse_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pinverse::name, pinverse::overload_name)
      .typed<pinverse::schema>();
}

// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor
at::Tensor pinverse::call(const at::Tensor & self, double rcond) {
    
    static auto op = create_pinverse_typed_handle();
    return op.call(self, rcond);
}

// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor
at::Tensor pinverse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond) {
    
    static auto op = create_pinverse_typed_handle();
    return op.redispatch(dispatchKeySet, self, rcond);
}

// aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<poisson_nll_loss::schema> create_poisson_nll_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(poisson_nll_loss::name, poisson_nll_loss::overload_name)
      .typed<poisson_nll_loss::schema>();
}

// aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor
at::Tensor poisson_nll_loss::call(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) {
    
    static auto op = create_poisson_nll_loss_typed_handle();
    return op.call(input, target, log_input, full, eps, reduction);
}

// aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor
at::Tensor poisson_nll_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) {
    
    static auto op = create_poisson_nll_loss_typed_handle();
    return op.redispatch(dispatchKeySet, input, target, log_input, full, eps, reduction);
}

// aten::rad2deg(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rad2deg::schema> create_rad2deg_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rad2deg::name, rad2deg::overload_name)
      .typed<rad2deg::schema>();
}

// aten::rad2deg(Tensor self) -> Tensor
at::Tensor rad2deg::call(const at::Tensor & self) {
    
    static auto op = create_rad2deg_typed_handle();
    return op.call(self);
}

// aten::rad2deg(Tensor self) -> Tensor
at::Tensor rad2deg::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_rad2deg_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::rad2deg_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rad2deg_::schema> create_rad2deg__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rad2deg_::name, rad2deg_::overload_name)
      .typed<rad2deg_::schema>();
}

// aten::rad2deg_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & rad2deg_::call(at::Tensor & self) {
    
    static auto op = create_rad2deg__typed_handle();
    return op.call(self);
}

// aten::rad2deg_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & rad2deg_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_rad2deg__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rad2deg_out::schema> create_rad2deg_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rad2deg_out::name, rad2deg_out::overload_name)
      .typed<rad2deg_out::schema>();
}

// aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rad2deg_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_rad2deg_out_typed_handle();
    return op.call(self, out);
}

// aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rad2deg_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_rad2deg_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::deg2rad(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<deg2rad::schema> create_deg2rad_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(deg2rad::name, deg2rad::overload_name)
      .typed<deg2rad::schema>();
}

// aten::deg2rad(Tensor self) -> Tensor
at::Tensor deg2rad::call(const at::Tensor & self) {
    
    static auto op = create_deg2rad_typed_handle();
    return op.call(self);
}

// aten::deg2rad(Tensor self) -> Tensor
at::Tensor deg2rad::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_deg2rad_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::deg2rad_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<deg2rad_::schema> create_deg2rad__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(deg2rad_::name, deg2rad_::overload_name)
      .typed<deg2rad_::schema>();
}

// aten::deg2rad_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & deg2rad_::call(at::Tensor & self) {
    
    static auto op = create_deg2rad__typed_handle();
    return op.call(self);
}

// aten::deg2rad_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & deg2rad_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_deg2rad__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<deg2rad_out::schema> create_deg2rad_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(deg2rad_out::name, deg2rad_out::overload_name)
      .typed<deg2rad_out::schema>();
}

// aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & deg2rad_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_deg2rad_out_typed_handle();
    return op.call(self, out);
}

// aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & deg2rad_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_deg2rad_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scalar_tensor::schema> create_scalar_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scalar_tensor::name, scalar_tensor::overload_name)
      .typed<scalar_tensor::schema>();
}

// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor scalar_tensor::call(const at::Scalar & s, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_scalar_tensor_typed_handle();
    return op.call(s, dtype, layout, device, pin_memory);
}

// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor scalar_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & s, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_scalar_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, s, dtype, layout, device, pin_memory);
}

// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rand_names::schema> create_rand_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_names::name, rand_names::overload_name)
      .typed<rand_names::schema>();
}

// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand_names::call(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_names_typed_handle();
    return op.call(size, names, dtype, layout, device, pin_memory);
}

// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand_names::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_names_typed_handle();
    return op.redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory);
}

// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rand_generator_with_names::schema> create_rand_generator_with_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_generator_with_names::name, rand_generator_with_names::overload_name)
      .typed<rand_generator_with_names::schema>();
}

// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand_generator_with_names::call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_generator_with_names_typed_handle();
    return op.call(size, generator, names, dtype, layout, device, pin_memory);
}

// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand_generator_with_names::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_generator_with_names_typed_handle();
    return op.redispatch(dispatchKeySet, size, generator, names, dtype, layout, device, pin_memory);
}

// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rand::schema> create_rand_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand::name, rand::overload_name)
      .typed<rand::schema>();
}

// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand::call(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_typed_handle();
    return op.call(size, dtype, layout, device, pin_memory);
}

// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_typed_handle();
    return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory);
}

// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rand_generator::schema> create_rand_generator_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_generator::name, rand_generator::overload_name)
      .typed<rand_generator::schema>();
}

// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand_generator::call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_generator_typed_handle();
    return op.call(size, generator, dtype, layout, device, pin_memory);
}

// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor rand_generator::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_rand_generator_typed_handle();
    return op.redispatch(dispatchKeySet, size, generator, dtype, layout, device, pin_memory);
}

// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rand_out::schema> create_rand_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_out::name, rand_out::overload_name)
      .typed<rand_out::schema>();
}

// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_out::call(c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_rand_out_typed_handle();
    return op.call(size, out);
}

// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_rand_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, out);
}

// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rand_generator_out::schema> create_rand_generator_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_generator_out::name, rand_generator_out::overload_name)
      .typed<rand_generator_out::schema>();
}

// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_generator_out::call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_rand_generator_out_typed_handle();
    return op.call(size, generator, out);
}

// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_generator_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_rand_generator_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, generator, out);
}

// aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rand_like::schema> create_rand_like_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_like::name, rand_like::overload_name)
      .typed<rand_like::schema>();
}

// aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor rand_like::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_rand_like_typed_handle();
    return op.call(self, dtype, layout, device, pin_memory, memory_format);
}

// aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor rand_like::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_rand_like_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format);
}

// aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randint::schema> create_randint_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint::name, randint::overload_name)
      .typed<randint::schema>();
}

// aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint::call(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_typed_handle();
    return op.call(high, size, dtype, layout, device, pin_memory);
}

// aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_typed_handle();
    return op.redispatch(dispatchKeySet, high, size, dtype, layout, device, pin_memory);
}

// aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randint_generator::schema> create_randint_generator_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_generator::name, randint_generator::overload_name)
      .typed<randint_generator::schema>();
}

// aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint_generator::call(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_generator_typed_handle();
    return op.call(high, size, generator, dtype, layout, device, pin_memory);
}

// aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint_generator::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_generator_typed_handle();
    return op.redispatch(dispatchKeySet, high, size, generator, dtype, layout, device, pin_memory);
}

// aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randint_low::schema> create_randint_low_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_low::name, randint_low::overload_name)
      .typed<randint_low::schema>();
}

// aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint_low::call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_low_typed_handle();
    return op.call(low, high, size, dtype, layout, device, pin_memory);
}

// aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint_low::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_low_typed_handle();
    return op.redispatch(dispatchKeySet, low, high, size, dtype, layout, device, pin_memory);
}

// aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randint_low_generator::schema> create_randint_low_generator_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_low_generator::name, randint_low_generator::overload_name)
      .typed<randint_low_generator::schema>();
}

// aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint_low_generator::call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_low_generator_typed_handle();
    return op.call(low, high, size, generator, dtype, layout, device, pin_memory);
}

// aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randint_low_generator::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randint_low_generator_typed_handle();
    return op.redispatch(dispatchKeySet, low, high, size, generator, dtype, layout, device, pin_memory);
}

// aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randint_out::schema> create_randint_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_out::name, randint_out::overload_name)
      .typed<randint_out::schema>();
}

// aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_out::call(c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_randint_out_typed_handle();
    return op.call(high, size, out);
}

// aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_randint_out_typed_handle();
    return op.redispatch(dispatchKeySet, high, size, out);
}

// aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randint_generator_out::schema> create_randint_generator_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_generator_out::name, randint_generator_out::overload_name)
      .typed<randint_generator_out::schema>();
}

// aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_generator_out::call(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_randint_generator_out_typed_handle();
    return op.call(high, size, generator, out);
}

// aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_generator_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_randint_generator_out_typed_handle();
    return op.redispatch(dispatchKeySet, high, size, generator, out);
}

// aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randint_low_out::schema> create_randint_low_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_low_out::name, randint_low_out::overload_name)
      .typed<randint_low_out::schema>();
}

// aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_low_out::call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_randint_low_out_typed_handle();
    return op.call(low, high, size, out);
}

// aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_low_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_randint_low_out_typed_handle();
    return op.redispatch(dispatchKeySet, low, high, size, out);
}

// aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randint_low_generator_out::schema> create_randint_low_generator_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_low_generator_out::name, randint_low_generator_out::overload_name)
      .typed<randint_low_generator_out::schema>();
}

// aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_low_generator_out::call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_randint_low_generator_out_typed_handle();
    return op.call(low, high, size, generator, out);
}

// aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_low_generator_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_randint_low_generator_out_typed_handle();
    return op.redispatch(dispatchKeySet, low, high, size, generator, out);
}

// aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randint_like::schema> create_randint_like_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_like::name, randint_like::overload_name)
      .typed<randint_like::schema>();
}

// aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor randint_like::call(const at::Tensor & self, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_randint_like_typed_handle();
    return op.call(self, high, dtype, layout, device, pin_memory, memory_format);
}

// aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor randint_like::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_randint_like_typed_handle();
    return op.redispatch(dispatchKeySet, self, high, dtype, layout, device, pin_memory, memory_format);
}

// aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randint_like_low_dtype::schema> create_randint_like_low_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_like_low_dtype::name, randint_like_low_dtype::overload_name)
      .typed<randint_like_low_dtype::schema>();
}

// aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor randint_like_low_dtype::call(const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_randint_like_low_dtype_typed_handle();
    return op.call(self, low, high, dtype, layout, device, pin_memory, memory_format);
}

// aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor randint_like_low_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_randint_like_low_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, low, high, dtype, layout, device, pin_memory, memory_format);
}

// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randn::schema> create_randn_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randn::name, randn::overload_name)
      .typed<randn::schema>();
}

// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randn::call(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randn_typed_handle();
    return op.call(size, dtype, layout, device, pin_memory);
}

// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randn::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randn_typed_handle();
    return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory);
}

// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randn_generator::schema> create_randn_generator_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randn_generator::name, randn_generator::overload_name)
      .typed<randn_generator::schema>();
}

// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randn_generator::call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randn_generator_typed_handle();
    return op.call(size, generator, dtype, layout, device, pin_memory);
}

// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randn_generator::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randn_generator_typed_handle();
    return op.redispatch(dispatchKeySet, size, generator, dtype, layout, device, pin_memory);
}

// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randn_names::schema> create_randn_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randn_names::name, randn_names::overload_name)
      .typed<randn_names::schema>();
}

// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randn_names::call(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randn_names_typed_handle();
    return op.call(size, names, dtype, layout, device, pin_memory);
}

// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randn_names::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randn_names_typed_handle();
    return op.redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory);
}

// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randn_generator_with_names::schema> create_randn_generator_with_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randn_generator_with_names::name, randn_generator_with_names::overload_name)
      .typed<randn_generator_with_names::schema>();
}

// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randn_generator_with_names::call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randn_generator_with_names_typed_handle();
    return op.call(size, generator, names, dtype, layout, device, pin_memory);
}

// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randn_generator_with_names::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randn_generator_with_names_typed_handle();
    return op.redispatch(dispatchKeySet, size, generator, names, dtype, layout, device, pin_memory);
}

// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randn_out::schema> create_randn_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randn_out::name, randn_out::overload_name)
      .typed<randn_out::schema>();
}

// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randn_out::call(c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_randn_out_typed_handle();
    return op.call(size, out);
}

// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randn_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_randn_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, out);
}

// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randn_generator_out::schema> create_randn_generator_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randn_generator_out::name, randn_generator_out::overload_name)
      .typed<randn_generator_out::schema>();
}

// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randn_generator_out::call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_randn_generator_out_typed_handle();
    return op.call(size, generator, out);
}

// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randn_generator_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_randn_generator_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, generator, out);
}

// aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randn_like::schema> create_randn_like_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randn_like::name, randn_like::overload_name)
      .typed<randn_like::schema>();
}

// aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor randn_like::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_randn_like_typed_handle();
    return op.call(self, dtype, layout, device, pin_memory, memory_format);
}

// aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor randn_like::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_randn_like_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format);
}

// aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randperm::schema> create_randperm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randperm::name, randperm::overload_name)
      .typed<randperm::schema>();
}

// aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randperm::call(c10::SymInt n, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randperm_typed_handle();
    return op.call(n, dtype, layout, device, pin_memory);
}

// aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randperm::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randperm_typed_handle();
    return op.redispatch(dispatchKeySet, n, dtype, layout, device, pin_memory);
}

// aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<randperm_generator::schema> create_randperm_generator_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randperm_generator::name, randperm_generator::overload_name)
      .typed<randperm_generator::schema>();
}

// aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randperm_generator::call(c10::SymInt n, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randperm_generator_typed_handle();
    return op.call(n, generator, dtype, layout, device, pin_memory);
}

// aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor randperm_generator::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_randperm_generator_typed_handle();
    return op.redispatch(dispatchKeySet, n, generator, dtype, layout, device, pin_memory);
}

// aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randperm_out::schema> create_randperm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randperm_out::name, randperm_out::overload_name)
      .typed<randperm_out::schema>();
}

// aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randperm_out::call(c10::SymInt n, at::Tensor & out) {
    
    static auto op = create_randperm_out_typed_handle();
    return op.call(n, out);
}

// aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randperm_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, at::Tensor & out) {
    
    static auto op = create_randperm_out_typed_handle();
    return op.redispatch(dispatchKeySet, n, out);
}

// aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randperm_generator_out::schema> create_randperm_generator_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randperm_generator_out::name, randperm_generator_out::overload_name)
      .typed<randperm_generator_out::schema>();
}

// aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randperm_generator_out::call(c10::SymInt n, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_randperm_generator_out_typed_handle();
    return op.call(n, generator, out);
}

// aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randperm_generator_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_randperm_generator_out_typed_handle();
    return op.redispatch(dispatchKeySet, n, generator, out);
}

// aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<range_step::schema> create_range_step_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(range_step::name, range_step::overload_name)
      .typed<range_step::schema>();
}

// aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor range_step::call(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_range_step_typed_handle();
    return op.call(start, end, step, dtype, layout, device, pin_memory);
}

// aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor range_step::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_range_step_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, step, dtype, layout, device, pin_memory);
}

// aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<range::schema> create_range_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(range::name, range::overload_name)
      .typed<range::schema>();
}

// aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor range::call(const at::Scalar & start, const at::Scalar & end, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_range_typed_handle();
    return op.call(start, end, dtype, layout, device, pin_memory);
}

// aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor range::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_range_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, dtype, layout, device, pin_memory);
}

// aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<range_out_::schema> create_range_out__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(range_out_::name, range_out_::overload_name)
      .typed<range_out_::schema>();
}

// aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & range_out_::call(const at::Scalar & start, const at::Scalar & end, at::Tensor & out) {
    
    static auto op = create_range_out__typed_handle();
    return op.call(start, end, out);
}

// aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & range_out_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, at::Tensor & out) {
    
    static auto op = create_range_out__typed_handle();
    return op.redispatch(dispatchKeySet, start, end, out);
}

// aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<range_out::schema> create_range_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(range_out::name, range_out::overload_name)
      .typed<range_out::schema>();
}

// aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & range_out::call(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
    
    static auto op = create_range_out_typed_handle();
    return op.call(start, end, step, out);
}

// aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & range_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
    
    static auto op = create_range_out_typed_handle();
    return op.redispatch(dispatchKeySet, start, end, step, out);
}

// aten::ravel(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<ravel::schema> create_ravel_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ravel::name, ravel::overload_name)
      .typed<ravel::schema>();
}

// aten::ravel(Tensor(a) self) -> Tensor(a)
at::Tensor ravel::call(const at::Tensor & self) {
    
    static auto op = create_ravel_typed_handle();
    return op.call(self);
}

// aten::ravel(Tensor(a) self) -> Tensor(a)
at::Tensor ravel::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_ravel_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::reciprocal(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<reciprocal::schema> create_reciprocal_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reciprocal::name, reciprocal::overload_name)
      .typed<reciprocal::schema>();
}

// aten::reciprocal(Tensor self) -> Tensor
at::Tensor reciprocal::call(const at::Tensor & self) {
    
    static auto op = create_reciprocal_typed_handle();
    return op.call(self);
}

// aten::reciprocal(Tensor self) -> Tensor
at::Tensor reciprocal::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_reciprocal_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<reciprocal_::schema> create_reciprocal__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reciprocal_::name, reciprocal_::overload_name)
      .typed<reciprocal_::schema>();
}

// aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & reciprocal_::call(at::Tensor & self) {
    
    static auto op = create_reciprocal__typed_handle();
    return op.call(self);
}

// aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & reciprocal_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_reciprocal__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<reciprocal_out::schema> create_reciprocal_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reciprocal_out::name, reciprocal_out::overload_name)
      .typed<reciprocal_out::schema>();
}

// aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & reciprocal_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_reciprocal_out_typed_handle();
    return op.call(self, out);
}

// aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & reciprocal_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_reciprocal_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::neg(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<neg::schema> create_neg_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(neg::name, neg::overload_name)
      .typed<neg::schema>();
}

// aten::neg(Tensor self) -> Tensor
at::Tensor neg::call(const at::Tensor & self) {
    
    static auto op = create_neg_typed_handle();
    return op.call(self);
}

// aten::neg(Tensor self) -> Tensor
at::Tensor neg::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_neg_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::neg_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<neg_::schema> create_neg__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(neg_::name, neg_::overload_name)
      .typed<neg_::schema>();
}

// aten::neg_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & neg_::call(at::Tensor & self) {
    
    static auto op = create_neg__typed_handle();
    return op.call(self);
}

// aten::neg_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & neg_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_neg__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<neg_out::schema> create_neg_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(neg_out::name, neg_out::overload_name)
      .typed<neg_out::schema>();
}

// aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & neg_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_neg_out_typed_handle();
    return op.call(self, out);
}

// aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & neg_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_neg_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::negative(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<negative::schema> create_negative_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(negative::name, negative::overload_name)
      .typed<negative::schema>();
}

// aten::negative(Tensor self) -> Tensor
at::Tensor negative::call(const at::Tensor & self) {
    
    static auto op = create_negative_typed_handle();
    return op.call(self);
}

// aten::negative(Tensor self) -> Tensor
at::Tensor negative::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_negative_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::negative_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<negative_::schema> create_negative__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(negative_::name, negative_::overload_name)
      .typed<negative_::schema>();
}

// aten::negative_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & negative_::call(at::Tensor & self) {
    
    static auto op = create_negative__typed_handle();
    return op.call(self);
}

// aten::negative_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & negative_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_negative__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<negative_out::schema> create_negative_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(negative_out::name, negative_out::overload_name)
      .typed<negative_out::schema>();
}

// aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & negative_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_negative_out_typed_handle();
    return op.call(self, out);
}

// aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & negative_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_negative_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<repeat::schema> create_repeat_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(repeat::name, repeat::overload_name)
      .typed<repeat::schema>();
}

// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor
at::Tensor repeat::call(const at::Tensor & self, c10::SymIntArrayRef repeats) {
    
    static auto op = create_repeat_typed_handle();
    return op.call(self, repeats);
}

// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor
at::Tensor repeat::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef repeats) {
    
    static auto op = create_repeat_typed_handle();
    return op.redispatch(dispatchKeySet, self, repeats);
}

// aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<repeat_interleave_Tensor::schema> create_repeat_interleave_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(repeat_interleave_Tensor::name, repeat_interleave_Tensor::overload_name)
      .typed<repeat_interleave_Tensor::schema>();
}

// aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor
at::Tensor repeat_interleave_Tensor::call(const at::Tensor & repeats, ::std::optional<c10::SymInt> output_size) {
    
    static auto op = create_repeat_interleave_Tensor_typed_handle();
    return op.call(repeats, output_size);
}

// aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor
at::Tensor repeat_interleave_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & repeats, ::std::optional<c10::SymInt> output_size) {
    
    static auto op = create_repeat_interleave_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, repeats, output_size);
}

// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<repeat_interleave_self_Tensor::schema> create_repeat_interleave_self_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(repeat_interleave_self_Tensor::name, repeat_interleave_self_Tensor::overload_name)
      .typed<repeat_interleave_self_Tensor::schema>();
}

// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
at::Tensor repeat_interleave_self_Tensor::call(const at::Tensor & self, const at::Tensor & repeats, ::std::optional<int64_t> dim, ::std::optional<c10::SymInt> output_size) {
    
    static auto op = create_repeat_interleave_self_Tensor_typed_handle();
    return op.call(self, repeats, dim, output_size);
}

// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
at::Tensor repeat_interleave_self_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & repeats, ::std::optional<int64_t> dim, ::std::optional<c10::SymInt> output_size) {
    
    static auto op = create_repeat_interleave_self_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, repeats, dim, output_size);
}

// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<repeat_interleave_self_int::schema> create_repeat_interleave_self_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(repeat_interleave_self_int::name, repeat_interleave_self_int::overload_name)
      .typed<repeat_interleave_self_int::schema>();
}

// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
at::Tensor repeat_interleave_self_int::call(const at::Tensor & self, c10::SymInt repeats, ::std::optional<int64_t> dim, ::std::optional<c10::SymInt> output_size) {
    
    static auto op = create_repeat_interleave_self_int_typed_handle();
    return op.call(self, repeats, dim, output_size);
}

// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor
at::Tensor repeat_interleave_self_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt repeats, ::std::optional<int64_t> dim, ::std::optional<c10::SymInt> output_size) {
    
    static auto op = create_repeat_interleave_self_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, repeats, dim, output_size);
}

// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<reshape::schema> create_reshape_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reshape::name, reshape::overload_name)
      .typed<reshape::schema>();
}

// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
at::Tensor reshape::call(const at::Tensor & self, c10::SymIntArrayRef shape) {
    
    static auto op = create_reshape_typed_handle();
    return op.call(self, shape);
}

// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
at::Tensor reshape::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef shape) {
    
    static auto op = create_reshape_typed_handle();
    return op.redispatch(dispatchKeySet, self, shape);
}

// aten::_reshape_copy(Tensor self, SymInt[] size) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_reshape_copy::schema> create__reshape_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_reshape_copy::name, _reshape_copy::overload_name)
      .typed<_reshape_copy::schema>();
}

// aten::_reshape_copy(Tensor self, SymInt[] size) -> Tensor
at::Tensor _reshape_copy::call(const at::Tensor & self, c10::SymIntArrayRef size) {
    
    static auto op = create__reshape_copy_typed_handle();
    return op.call(self, size);
}

// aten::_reshape_copy(Tensor self, SymInt[] size) -> Tensor
at::Tensor _reshape_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) {
    
    static auto op = create__reshape_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, size);
}

// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<_reshape_alias::schema> create__reshape_alias_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_reshape_alias::name, _reshape_alias::overload_name)
      .typed<_reshape_alias::schema>();
}

// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
at::Tensor _reshape_alias::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create__reshape_alias_typed_handle();
    return op.call(self, size, stride);
}

// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
at::Tensor _reshape_alias::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create__reshape_alias_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, stride);
}

// aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_mkldnn_reshape::schema> create__mkldnn_reshape_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_mkldnn_reshape::name, _mkldnn_reshape::overload_name)
      .typed<_mkldnn_reshape::schema>();
}

// aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor
at::Tensor _mkldnn_reshape::call(const at::Tensor & self, at::IntArrayRef shape) {
    
    static auto op = create__mkldnn_reshape_typed_handle();
    return op.call(self, shape);
}

// aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor
at::Tensor _mkldnn_reshape::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape) {
    
    static auto op = create__mkldnn_reshape_typed_handle();
    return op.redispatch(dispatchKeySet, self, shape);
}

// aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<reshape_as::schema> create_reshape_as_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reshape_as::name, reshape_as::overload_name)
      .typed<reshape_as::schema>();
}

// aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)
at::Tensor reshape_as::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_reshape_as_typed_handle();
    return op.call(self, other);
}

// aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)
at::Tensor reshape_as::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_reshape_as_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::round(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<round::schema> create_round_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(round::name, round::overload_name)
      .typed<round::schema>();
}

// aten::round(Tensor self) -> Tensor
at::Tensor round::call(const at::Tensor & self) {
    
    static auto op = create_round_typed_handle();
    return op.call(self);
}

// aten::round(Tensor self) -> Tensor
at::Tensor round::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_round_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::round_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<round_::schema> create_round__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(round_::name, round_::overload_name)
      .typed<round_::schema>();
}

// aten::round_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & round_::call(at::Tensor & self) {
    
    static auto op = create_round__typed_handle();
    return op.call(self);
}

// aten::round_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & round_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_round__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<round_out::schema> create_round_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(round_out::name, round_out::overload_name)
      .typed<round_out::schema>();
}

// aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & round_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_round_out_typed_handle();
    return op.call(self, out);
}

// aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & round_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_round_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::round.decimals(Tensor self, *, int decimals) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<round_decimals::schema> create_round_decimals_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(round_decimals::name, round_decimals::overload_name)
      .typed<round_decimals::schema>();
}

// aten::round.decimals(Tensor self, *, int decimals) -> Tensor
at::Tensor round_decimals::call(const at::Tensor & self, int64_t decimals) {
    
    static auto op = create_round_decimals_typed_handle();
    return op.call(self, decimals);
}

// aten::round.decimals(Tensor self, *, int decimals) -> Tensor
at::Tensor round_decimals::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals) {
    
    static auto op = create_round_decimals_typed_handle();
    return op.redispatch(dispatchKeySet, self, decimals);
}

// aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<round__decimals::schema> create_round__decimals_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(round__decimals::name, round__decimals::overload_name)
      .typed<round__decimals::schema>();
}

// aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)
at::Tensor & round__decimals::call(at::Tensor & self, int64_t decimals) {
    
    static auto op = create_round__decimals_typed_handle();
    return op.call(self, decimals);
}

// aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)
at::Tensor & round__decimals::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t decimals) {
    
    static auto op = create_round__decimals_typed_handle();
    return op.redispatch(dispatchKeySet, self, decimals);
}

// aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<round_decimals_out::schema> create_round_decimals_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(round_decimals_out::name, round_decimals_out::overload_name)
      .typed<round_decimals_out::schema>();
}

// aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)
at::Tensor & round_decimals_out::call(const at::Tensor & self, int64_t decimals, at::Tensor & out) {
    
    static auto op = create_round_decimals_out_typed_handle();
    return op.call(self, decimals, out);
}

// aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)
at::Tensor & round_decimals_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals, at::Tensor & out) {
    
    static auto op = create_round_decimals_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, decimals, out);
}

// aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rrelu::schema> create_rrelu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rrelu::name, rrelu::overload_name)
      .typed<rrelu::schema>();
}

// aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
at::Tensor rrelu::call(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu_typed_handle();
    return op.call(self, lower, upper, training, generator);
}

// aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
at::Tensor rrelu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu_typed_handle();
    return op.redispatch(dispatchKeySet, self, lower, upper, training, generator);
}

// aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rrelu_::schema> create_rrelu__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rrelu_::name, rrelu_::overload_name)
      .typed<rrelu_::schema>();
}

// aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
at::Tensor & rrelu_::call(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu__typed_handle();
    return op.call(self, lower, upper, training, generator);
}

// aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
at::Tensor & rrelu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu__typed_handle();
    return op.redispatch(dispatchKeySet, self, lower, upper, training, generator);
}

// aten::relu(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<relu::schema> create_relu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(relu::name, relu::overload_name)
      .typed<relu::schema>();
}

// aten::relu(Tensor self) -> Tensor
at::Tensor relu::call(const at::Tensor & self) {
    
    static auto op = create_relu_typed_handle();
    return op.call(self);
}

// aten::relu(Tensor self) -> Tensor
at::Tensor relu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_relu_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::relu_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<relu_::schema> create_relu__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(relu_::name, relu_::overload_name)
      .typed<relu_::schema>();
}

// aten::relu_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & relu_::call(at::Tensor & self) {
    
    static auto op = create_relu__typed_handle();
    return op.call(self);
}

// aten::relu_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & relu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_relu__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::relu6(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<relu6::schema> create_relu6_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(relu6::name, relu6::overload_name)
      .typed<relu6::schema>();
}

// aten::relu6(Tensor self) -> Tensor
at::Tensor relu6::call(const at::Tensor & self) {
    
    static auto op = create_relu6_typed_handle();
    return op.call(self);
}

// aten::relu6(Tensor self) -> Tensor
at::Tensor relu6::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_relu6_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::relu6_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<relu6_::schema> create_relu6__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(relu6_::name, relu6_::overload_name)
      .typed<relu6_::schema>();
}

// aten::relu6_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & relu6_::call(at::Tensor & self) {
    
    static auto op = create_relu6__typed_handle();
    return op.call(self);
}

// aten::relu6_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & relu6_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_relu6__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::prelu(Tensor self, Tensor weight) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<prelu::schema> create_prelu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(prelu::name, prelu::overload_name)
      .typed<prelu::schema>();
}

// aten::prelu(Tensor self, Tensor weight) -> Tensor
at::Tensor prelu::call(const at::Tensor & self, const at::Tensor & weight) {
    
    static auto op = create_prelu_typed_handle();
    return op.call(self, weight);
}

// aten::prelu(Tensor self, Tensor weight) -> Tensor
at::Tensor prelu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight) {
    
    static auto op = create_prelu_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight);
}

// aten::_prelu_kernel(Tensor self, Tensor weight) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_prelu_kernel::schema> create__prelu_kernel_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_prelu_kernel::name, _prelu_kernel::overload_name)
      .typed<_prelu_kernel::schema>();
}

// aten::_prelu_kernel(Tensor self, Tensor weight) -> Tensor
at::Tensor _prelu_kernel::call(const at::Tensor & self, const at::Tensor & weight) {
    
    static auto op = create__prelu_kernel_typed_handle();
    return op.call(self, weight);
}

// aten::_prelu_kernel(Tensor self, Tensor weight) -> Tensor
at::Tensor _prelu_kernel::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight) {
    
    static auto op = create__prelu_kernel_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight);
}

// aten::_prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_prelu_kernel_backward::schema> create__prelu_kernel_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_prelu_kernel_backward::name, _prelu_kernel_backward::overload_name)
      .typed<_prelu_kernel_backward::schema>();
}

// aten::_prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _prelu_kernel_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) {
    
    static auto op = create__prelu_kernel_backward_typed_handle();
    return op.call(grad_output, self, weight);
}

// aten::_prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _prelu_kernel_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) {
    
    static auto op = create__prelu_kernel_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, weight);
}

// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gelu_out::schema> create_gelu_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gelu_out::name, gelu_out::overload_name)
      .typed<gelu_out::schema>();
}

// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)
at::Tensor & gelu_out::call(const at::Tensor & self, c10::string_view approximate, at::Tensor & out) {
    
    static auto op = create_gelu_out_typed_handle();
    return op.call(self, approximate, out);
}

// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)
at::Tensor & gelu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view approximate, at::Tensor & out) {
    
    static auto op = create_gelu_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, approximate, out);
}

// aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gelu_::schema> create_gelu__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gelu_::name, gelu_::overload_name)
      .typed<gelu_::schema>();
}

// aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)
at::Tensor & gelu_::call(at::Tensor & self, c10::string_view approximate) {
    
    static auto op = create_gelu__typed_handle();
    return op.call(self, approximate);
}

// aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)
at::Tensor & gelu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::string_view approximate) {
    
    static auto op = create_gelu__typed_handle();
    return op.redispatch(dispatchKeySet, self, approximate);
}

// aten::gelu(Tensor self, *, str approximate='none') -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<gelu::schema> create_gelu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gelu::name, gelu::overload_name)
      .typed<gelu::schema>();
}

// aten::gelu(Tensor self, *, str approximate='none') -> Tensor
at::Tensor gelu::call(const at::Tensor & self, c10::string_view approximate) {
    
    static auto op = create_gelu_typed_handle();
    return op.call(self, approximate);
}

// aten::gelu(Tensor self, *, str approximate='none') -> Tensor
at::Tensor gelu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view approximate) {
    
    static auto op = create_gelu_typed_handle();
    return op.redispatch(dispatchKeySet, self, approximate);
}

// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gelu_backward_grad_input::schema> create_gelu_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gelu_backward_grad_input::name, gelu_backward_grad_input::overload_name)
      .typed<gelu_backward_grad_input::schema>();
}

// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & gelu_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input) {
    
    static auto op = create_gelu_backward_grad_input_typed_handle();
    return op.call(grad_output, self, approximate, grad_input);
}

// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & gelu_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input) {
    
    static auto op = create_gelu_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, approximate, grad_input);
}

// aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<gelu_backward::schema> create_gelu_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gelu_backward::name, gelu_backward::overload_name)
      .typed<gelu_backward::schema>();
}

// aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor
at::Tensor gelu_backward::call(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
    
    static auto op = create_gelu_backward_typed_handle();
    return op.call(grad_output, self, approximate);
}

// aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor
at::Tensor gelu_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
    
    static auto op = create_gelu_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, approximate);
}

// aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<infinitely_differentiable_gelu_backward::schema> create_infinitely_differentiable_gelu_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(infinitely_differentiable_gelu_backward::name, infinitely_differentiable_gelu_backward::overload_name)
      .typed<infinitely_differentiable_gelu_backward::schema>();
}

// aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor
at::Tensor infinitely_differentiable_gelu_backward::call(const at::Tensor & grad, const at::Tensor & self) {
    
    static auto op = create_infinitely_differentiable_gelu_backward_typed_handle();
    return op.call(grad, self);
}

// aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor
at::Tensor infinitely_differentiable_gelu_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self) {
    
    static auto op = create_infinitely_differentiable_gelu_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, self);
}

// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hardshrink_out::schema> create_hardshrink_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardshrink_out::name, hardshrink_out::overload_name)
      .typed<hardshrink_out::schema>();
}

// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hardshrink_out::call(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
    
    static auto op = create_hardshrink_out_typed_handle();
    return op.call(self, lambd, out);
}

// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hardshrink_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
    
    static auto op = create_hardshrink_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, lambd, out);
}

// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hardshrink::schema> create_hardshrink_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardshrink::name, hardshrink::overload_name)
      .typed<hardshrink::schema>();
}

// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor
at::Tensor hardshrink::call(const at::Tensor & self, const at::Scalar & lambd) {
    
    static auto op = create_hardshrink_typed_handle();
    return op.call(self, lambd);
}

// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor
at::Tensor hardshrink::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd) {
    
    static auto op = create_hardshrink_typed_handle();
    return op.redispatch(dispatchKeySet, self, lambd);
}

// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hardshrink_backward_grad_input::schema> create_hardshrink_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardshrink_backward_grad_input::name, hardshrink_backward_grad_input::overload_name)
      .typed<hardshrink_backward_grad_input::schema>();
}

// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & hardshrink_backward_grad_input::call(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
    
    static auto op = create_hardshrink_backward_grad_input_typed_handle();
    return op.call(grad_out, self, lambd, grad_input);
}

// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & hardshrink_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
    
    static auto op = create_hardshrink_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, self, lambd, grad_input);
}

// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hardshrink_backward::schema> create_hardshrink_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardshrink_backward::name, hardshrink_backward::overload_name)
      .typed<hardshrink_backward::schema>();
}

// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor
at::Tensor hardshrink_backward::call(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
    
    static auto op = create_hardshrink_backward_typed_handle();
    return op.call(grad_out, self, lambd);
}

// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor
at::Tensor hardshrink_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
    
    static auto op = create_hardshrink_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, self, lambd);
}

// aten::rsqrt(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rsqrt::schema> create_rsqrt_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rsqrt::name, rsqrt::overload_name)
      .typed<rsqrt::schema>();
}

// aten::rsqrt(Tensor self) -> Tensor
at::Tensor rsqrt::call(const at::Tensor & self) {
    
    static auto op = create_rsqrt_typed_handle();
    return op.call(self);
}

// aten::rsqrt(Tensor self) -> Tensor
at::Tensor rsqrt::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_rsqrt_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rsqrt_::schema> create_rsqrt__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rsqrt_::name, rsqrt_::overload_name)
      .typed<rsqrt_::schema>();
}

// aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & rsqrt_::call(at::Tensor & self) {
    
    static auto op = create_rsqrt__typed_handle();
    return op.call(self);
}

// aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & rsqrt_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_rsqrt__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rsqrt_out::schema> create_rsqrt_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rsqrt_out::name, rsqrt_out::overload_name)
      .typed<rsqrt_out::schema>();
}

// aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rsqrt_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_rsqrt_out_typed_handle();
    return op.call(self, out);
}

// aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rsqrt_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_rsqrt_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<select_Dimname::schema> create_select_Dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(select_Dimname::name, select_Dimname::overload_name)
      .typed<select_Dimname::schema>();
}

// aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)
at::Tensor select_Dimname::call(const at::Tensor & self, at::Dimname dim, int64_t index) {
    
    static auto op = create_select_Dimname_typed_handle();
    return op.call(self, dim, index);
}

// aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)
at::Tensor select_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, int64_t index) {
    
    static auto op = create_select_Dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index);
}

// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<select_int::schema> create_select_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(select_int::name, select_int::overload_name)
      .typed<select_int::schema>();
}

// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)
at::Tensor select_int::call(const at::Tensor & self, int64_t dim, c10::SymInt index) {
    
    static auto op = create_select_int_typed_handle();
    return op.call(self, dim, index);
}

// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)
at::Tensor select_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt index) {
    
    static auto op = create_select_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index);
}

// aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<select_backward::schema> create_select_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(select_backward::name, select_backward::overload_name)
      .typed<select_backward::schema>();
}

// aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor
at::Tensor select_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {
    
    static auto op = create_select_backward_typed_handle();
    return op.call(grad_output, input_sizes, dim, index);
}

// aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor
at::Tensor select_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {
    
    static auto op = create_select_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input_sizes, dim, index);
}

// aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_select_backward::schema> create__nested_select_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_select_backward::name, _nested_select_backward::overload_name)
      .typed<_nested_select_backward::schema>();
}

// aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor
at::Tensor _nested_select_backward::call(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index) {
    
    static auto op = create__nested_select_backward_typed_handle();
    return op.call(grad_output, self, dim, index);
}

// aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor
at::Tensor _nested_select_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index) {
    
    static auto op = create__nested_select_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, dim, index);
}

// aten::selu(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<selu::schema> create_selu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(selu::name, selu::overload_name)
      .typed<selu::schema>();
}

// aten::selu(Tensor self) -> Tensor
at::Tensor selu::call(const at::Tensor & self) {
    
    static auto op = create_selu_typed_handle();
    return op.call(self);
}

// aten::selu(Tensor self) -> Tensor
at::Tensor selu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_selu_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::selu_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<selu_::schema> create_selu__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(selu_::name, selu_::overload_name)
      .typed<selu_::schema>();
}

// aten::selu_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & selu_::call(at::Tensor & self) {
    
    static auto op = create_selu__typed_handle();
    return op.call(self);
}

// aten::selu_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & selu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_selu__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<celu::schema> create_celu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(celu::name, celu::overload_name)
      .typed<celu::schema>();
}

// aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor
at::Tensor celu::call(const at::Tensor & self, const at::Scalar & alpha) {
    
    static auto op = create_celu_typed_handle();
    return op.call(self, alpha);
}

// aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor
at::Tensor celu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha) {
    
    static auto op = create_celu_typed_handle();
    return op.redispatch(dispatchKeySet, self, alpha);
}

// aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<celu_::schema> create_celu__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(celu_::name, celu_::overload_name)
      .typed<celu_::schema>();
}

// aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)
at::Tensor & celu_::call(at::Tensor & self, const at::Scalar & alpha) {
    
    static auto op = create_celu__typed_handle();
    return op.call(self, alpha);
}

// aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)
at::Tensor & celu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & alpha) {
    
    static auto op = create_celu__typed_handle();
    return op.redispatch(dispatchKeySet, self, alpha);
}

// aten::silu(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<silu::schema> create_silu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(silu::name, silu::overload_name)
      .typed<silu::schema>();
}

// aten::silu(Tensor self) -> Tensor
at::Tensor silu::call(const at::Tensor & self) {
    
    static auto op = create_silu_typed_handle();
    return op.call(self);
}

// aten::silu(Tensor self) -> Tensor
at::Tensor silu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_silu_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::silu_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<silu_::schema> create_silu__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(silu_::name, silu_::overload_name)
      .typed<silu_::schema>();
}

// aten::silu_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & silu_::call(at::Tensor & self) {
    
    static auto op = create_silu__typed_handle();
    return op.call(self);
}

// aten::silu_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & silu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_silu__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<silu_out::schema> create_silu_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(silu_out::name, silu_out::overload_name)
      .typed<silu_out::schema>();
}

// aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & silu_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_silu_out_typed_handle();
    return op.call(self, out);
}

// aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & silu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_silu_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<silu_backward_grad_input::schema> create_silu_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(silu_backward_grad_input::name, silu_backward_grad_input::overload_name)
      .typed<silu_backward_grad_input::schema>();
}

// aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & silu_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
    
    static auto op = create_silu_backward_grad_input_typed_handle();
    return op.call(grad_output, self, grad_input);
}

// aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & silu_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
    
    static auto op = create_silu_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, grad_input);
}

// aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<silu_backward::schema> create_silu_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(silu_backward::name, silu_backward::overload_name)
      .typed<silu_backward::schema>();
}

// aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor silu_backward::call(const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create_silu_backward_typed_handle();
    return op.call(grad_output, self);
}

// aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor silu_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create_silu_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self);
}

// aten::mish(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mish::schema> create_mish_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mish::name, mish::overload_name)
      .typed<mish::schema>();
}

// aten::mish(Tensor self) -> Tensor
at::Tensor mish::call(const at::Tensor & self) {
    
    static auto op = create_mish_typed_handle();
    return op.call(self);
}

// aten::mish(Tensor self) -> Tensor
at::Tensor mish::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_mish_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::mish_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mish_::schema> create_mish__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mish_::name, mish_::overload_name)
      .typed<mish_::schema>();
}

// aten::mish_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & mish_::call(at::Tensor & self) {
    
    static auto op = create_mish__typed_handle();
    return op.call(self);
}

// aten::mish_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & mish_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_mish__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mish_out::schema> create_mish_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mish_out::name, mish_out::overload_name)
      .typed<mish_out::schema>();
}

// aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mish_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_mish_out_typed_handle();
    return op.call(self, out);
}

// aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mish_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_mish_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::mish_backward(Tensor grad_output, Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mish_backward::schema> create_mish_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mish_backward::name, mish_backward::overload_name)
      .typed<mish_backward::schema>();
}

// aten::mish_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor mish_backward::call(const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create_mish_backward_typed_handle();
    return op.call(grad_output, self);
}

// aten::mish_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor mish_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create_mish_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self);
}

// aten::sigmoid(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sigmoid::schema> create_sigmoid_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sigmoid::name, sigmoid::overload_name)
      .typed<sigmoid::schema>();
}

// aten::sigmoid(Tensor self) -> Tensor
at::Tensor sigmoid::call(const at::Tensor & self) {
    
    static auto op = create_sigmoid_typed_handle();
    return op.call(self);
}

// aten::sigmoid(Tensor self) -> Tensor
at::Tensor sigmoid::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_sigmoid_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sigmoid_::schema> create_sigmoid__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sigmoid_::name, sigmoid_::overload_name)
      .typed<sigmoid_::schema>();
}

// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & sigmoid_::call(at::Tensor & self) {
    
    static auto op = create_sigmoid__typed_handle();
    return op.call(self);
}

// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & sigmoid_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_sigmoid__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sigmoid_out::schema> create_sigmoid_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sigmoid_out::name, sigmoid_out::overload_name)
      .typed<sigmoid_out::schema>();
}

// aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sigmoid_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_sigmoid_out_typed_handle();
    return op.call(self, out);
}

// aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sigmoid_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_sigmoid_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::logit(Tensor self, float? eps=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logit::schema> create_logit_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logit::name, logit::overload_name)
      .typed<logit::schema>();
}

// aten::logit(Tensor self, float? eps=None) -> Tensor
at::Tensor logit::call(const at::Tensor & self, ::std::optional<double> eps) {
    
    static auto op = create_logit_typed_handle();
    return op.call(self, eps);
}

// aten::logit(Tensor self, float? eps=None) -> Tensor
at::Tensor logit::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> eps) {
    
    static auto op = create_logit_typed_handle();
    return op.redispatch(dispatchKeySet, self, eps);
}

// aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logit_::schema> create_logit__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logit_::name, logit_::overload_name)
      .typed<logit_::schema>();
}

// aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)
at::Tensor & logit_::call(at::Tensor & self, ::std::optional<double> eps) {
    
    static auto op = create_logit__typed_handle();
    return op.call(self, eps);
}

// aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)
at::Tensor & logit_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, ::std::optional<double> eps) {
    
    static auto op = create_logit__typed_handle();
    return op.redispatch(dispatchKeySet, self, eps);
}

// aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logit_out::schema> create_logit_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logit_out::name, logit_out::overload_name)
      .typed<logit_out::schema>();
}

// aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logit_out::call(const at::Tensor & self, ::std::optional<double> eps, at::Tensor & out) {
    
    static auto op = create_logit_out_typed_handle();
    return op.call(self, eps, out);
}

// aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & logit_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> eps, at::Tensor & out) {
    
    static auto op = create_logit_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, eps, out);
}

// aten::sin(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sin::schema> create_sin_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sin::name, sin::overload_name)
      .typed<sin::schema>();
}

// aten::sin(Tensor self) -> Tensor
at::Tensor sin::call(const at::Tensor & self) {
    
    static auto op = create_sin_typed_handle();
    return op.call(self);
}

// aten::sin(Tensor self) -> Tensor
at::Tensor sin::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_sin_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sin_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sin_::schema> create_sin__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sin_::name, sin_::overload_name)
      .typed<sin_::schema>();
}

// aten::sin_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & sin_::call(at::Tensor & self) {
    
    static auto op = create_sin__typed_handle();
    return op.call(self);
}

// aten::sin_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & sin_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_sin__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sin_out::schema> create_sin_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sin_out::name, sin_out::overload_name)
      .typed<sin_out::schema>();
}

// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sin_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_sin_out_typed_handle();
    return op.call(self, out);
}

// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sin_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_sin_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::sinc(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sinc::schema> create_sinc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sinc::name, sinc::overload_name)
      .typed<sinc::schema>();
}

// aten::sinc(Tensor self) -> Tensor
at::Tensor sinc::call(const at::Tensor & self) {
    
    static auto op = create_sinc_typed_handle();
    return op.call(self);
}

// aten::sinc(Tensor self) -> Tensor
at::Tensor sinc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_sinc_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sinc_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sinc_::schema> create_sinc__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sinc_::name, sinc_::overload_name)
      .typed<sinc_::schema>();
}

// aten::sinc_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & sinc_::call(at::Tensor & self) {
    
    static auto op = create_sinc__typed_handle();
    return op.call(self);
}

// aten::sinc_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & sinc_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_sinc__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sinc_out::schema> create_sinc_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sinc_out::name, sinc_out::overload_name)
      .typed<sinc_out::schema>();
}

// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sinc_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_sinc_out_typed_handle();
    return op.call(self, out);
}

// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sinc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_sinc_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::sinh(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sinh::schema> create_sinh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sinh::name, sinh::overload_name)
      .typed<sinh::schema>();
}

// aten::sinh(Tensor self) -> Tensor
at::Tensor sinh::call(const at::Tensor & self) {
    
    static auto op = create_sinh_typed_handle();
    return op.call(self);
}

// aten::sinh(Tensor self) -> Tensor
at::Tensor sinh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_sinh_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sinh_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sinh_::schema> create_sinh__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sinh_::name, sinh_::overload_name)
      .typed<sinh_::schema>();
}

// aten::sinh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & sinh_::call(at::Tensor & self) {
    
    static auto op = create_sinh__typed_handle();
    return op.call(self);
}

// aten::sinh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & sinh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_sinh__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sinh_out::schema> create_sinh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sinh_out::name, sinh_out::overload_name)
      .typed<sinh_out::schema>();
}

// aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sinh_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_sinh_out_typed_handle();
    return op.call(self, out);
}

// aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sinh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_sinh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::detach(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<detach::schema> create_detach_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(detach::name, detach::overload_name)
      .typed<detach::schema>();
}

// aten::detach(Tensor(a) self) -> Tensor(a)
at::Tensor detach::call(const at::Tensor & self) {
    
    static auto op = create_detach_typed_handle();
    return op.call(self);
}

// aten::detach(Tensor(a) self) -> Tensor(a)
at::Tensor detach::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_detach_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::detach_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<detach_::schema> create_detach__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(detach_::name, detach_::overload_name)
      .typed<detach_::schema>();
}

// aten::detach_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & detach_::call(at::Tensor & self) {
    
    static auto op = create_detach__typed_handle();
    return op.call(self);
}

// aten::detach_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & detach_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_detach__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::size.int(Tensor self, int dim) -> int
static C10_NOINLINE c10::TypedOperatorHandle<size_int::schema> create_size_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(size_int::name, size_int::overload_name)
      .typed<size_int::schema>();
}

// aten::size.int(Tensor self, int dim) -> int
int64_t size_int::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_size_int_typed_handle();
    return op.call(self, dim);
}

// aten::size.int(Tensor self, int dim) -> int
int64_t size_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_size_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::size.Dimname(Tensor self, Dimname dim) -> int
static C10_NOINLINE c10::TypedOperatorHandle<size_Dimname::schema> create_size_Dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(size_Dimname::name, size_Dimname::overload_name)
      .typed<size_Dimname::schema>();
}

// aten::size.Dimname(Tensor self, Dimname dim) -> int
int64_t size_Dimname::call(const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_size_Dimname_typed_handle();
    return op.call(self, dim);
}

// aten::size.Dimname(Tensor self, Dimname dim) -> int
int64_t size_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_size_Dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::sym_size.int(Tensor self, int dim) -> SymInt
static C10_NOINLINE c10::TypedOperatorHandle<sym_size_int::schema> create_sym_size_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sym_size_int::name, sym_size_int::overload_name)
      .typed<sym_size_int::schema>();
}

// aten::sym_size.int(Tensor self, int dim) -> SymInt
c10::SymInt sym_size_int::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_sym_size_int_typed_handle();
    return op.call(self, dim);
}

// aten::sym_size.int(Tensor self, int dim) -> SymInt
c10::SymInt sym_size_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_sym_size_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::sym_numel(Tensor self) -> SymInt
static C10_NOINLINE c10::TypedOperatorHandle<sym_numel::schema> create_sym_numel_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sym_numel::name, sym_numel::overload_name)
      .typed<sym_numel::schema>();
}

// aten::sym_numel(Tensor self) -> SymInt
c10::SymInt sym_numel::call(const at::Tensor & self) {
    
    static auto op = create_sym_numel_typed_handle();
    return op.call(self);
}

// aten::sym_numel(Tensor self) -> SymInt
c10::SymInt sym_numel::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_sym_numel_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sym_storage_offset(Tensor self) -> SymInt
static C10_NOINLINE c10::TypedOperatorHandle<sym_storage_offset::schema> create_sym_storage_offset_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sym_storage_offset::name, sym_storage_offset::overload_name)
      .typed<sym_storage_offset::schema>();
}

// aten::sym_storage_offset(Tensor self) -> SymInt
c10::SymInt sym_storage_offset::call(const at::Tensor & self) {
    
    static auto op = create_sym_storage_offset_typed_handle();
    return op.call(self);
}

// aten::sym_storage_offset(Tensor self) -> SymInt
c10::SymInt sym_storage_offset::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_sym_storage_offset_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<slice_Tensor::schema> create_slice_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slice_Tensor::name, slice_Tensor::overload_name)
      .typed<slice_Tensor::schema>();
}

// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
at::Tensor slice_Tensor::call(const at::Tensor & self, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
    
    static auto op = create_slice_Tensor_typed_handle();
    return op.call(self, dim, start, end, step);
}

// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
at::Tensor slice_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
    
    static auto op = create_slice_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, start, end, step);
}

// aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<slice_backward::schema> create_slice_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slice_backward::name, slice_backward::overload_name)
      .typed<slice_backward::schema>();
}

// aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor
at::Tensor slice_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) {
    
    static auto op = create_slice_backward_typed_handle();
    return op.call(grad_output, input_sizes, dim, start, end, step);
}

// aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor
at::Tensor slice_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) {
    
    static auto op = create_slice_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input_sizes, dim, start, end, step);
}

// aten::slice_inverse(Tensor(a) self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<slice_inverse::schema> create_slice_inverse_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slice_inverse::name, slice_inverse::overload_name)
      .typed<slice_inverse::schema>();
}

// aten::slice_inverse(Tensor(a) self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
at::Tensor slice_inverse::call(const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
    
    static auto op = create_slice_inverse_typed_handle();
    return op.call(self, src, dim, start, end, step);
}

// aten::slice_inverse(Tensor(a) self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
at::Tensor slice_inverse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
    
    static auto op = create_slice_inverse_typed_handle();
    return op.redispatch(dispatchKeySet, self, src, dim, start, end, step);
}

// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<slice_scatter::schema> create_slice_scatter_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slice_scatter::name, slice_scatter::overload_name)
      .typed<slice_scatter::schema>();
}

// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
at::Tensor slice_scatter::call(const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
    
    static auto op = create_slice_scatter_typed_handle();
    return op.call(self, src, dim, start, end, step);
}

// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
at::Tensor slice_scatter::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
    
    static auto op = create_slice_scatter_typed_handle();
    return op.redispatch(dispatchKeySet, self, src, dim, start, end, step);
}

// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<select_scatter::schema> create_select_scatter_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(select_scatter::name, select_scatter::overload_name)
      .typed<select_scatter::schema>();
}

// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor
at::Tensor select_scatter::call(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) {
    
    static auto op = create_select_scatter_typed_handle();
    return op.call(self, src, dim, index);
}

// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor
at::Tensor select_scatter::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) {
    
    static auto op = create_select_scatter_typed_handle();
    return op.redispatch(dispatchKeySet, self, src, dim, index);
}

// aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<diagonal_scatter::schema> create_diagonal_scatter_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diagonal_scatter::name, diagonal_scatter::overload_name)
      .typed<diagonal_scatter::schema>();
}

// aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor
at::Tensor diagonal_scatter::call(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) {
    
    static auto op = create_diagonal_scatter_typed_handle();
    return op.call(self, src, offset, dim1, dim2);
}

// aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor
at::Tensor diagonal_scatter::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) {
    
    static auto op = create_diagonal_scatter_typed_handle();
    return op.redispatch(dispatchKeySet, self, src, offset, dim1, dim2);
}

// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<as_strided_scatter::schema> create_as_strided_scatter_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(as_strided_scatter::name, as_strided_scatter::overload_name)
      .typed<as_strided_scatter::schema>();
}

// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
at::Tensor as_strided_scatter::call(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
    
    static auto op = create_as_strided_scatter_typed_handle();
    return op.call(self, src, size, stride, storage_offset);
}

// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
at::Tensor as_strided_scatter::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
    
    static auto op = create_as_strided_scatter_typed_handle();
    return op.redispatch(dispatchKeySet, self, src, size, stride, storage_offset);
}

// aten::smm(Tensor self, Tensor mat2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<smm::schema> create_smm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(smm::name, smm::overload_name)
      .typed<smm::schema>();
}

// aten::smm(Tensor self, Tensor mat2) -> Tensor
at::Tensor smm::call(const at::Tensor & self, const at::Tensor & mat2) {
    
    static auto op = create_smm_typed_handle();
    return op.call(self, mat2);
}

// aten::smm(Tensor self, Tensor mat2) -> Tensor
at::Tensor smm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2) {
    
    static auto op = create_smm_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat2);
}

// aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<softmax_int::schema> create_softmax_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(softmax_int::name, softmax_int::overload_name)
      .typed<softmax_int::schema>();
}

// aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor softmax_int::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_softmax_int_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor softmax_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_softmax_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<softmax_int_out::schema> create_softmax_int_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(softmax_int_out::name, softmax_int_out::overload_name)
      .typed<softmax_int_out::schema>();
}

// aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & softmax_int_out::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_softmax_int_out_typed_handle();
    return op.call(self, dim, dtype, out);
}

// aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & softmax_int_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_softmax_int_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype, out);
}

// aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<softmax_Dimname::schema> create_softmax_Dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(softmax_Dimname::name, softmax_Dimname::overload_name)
      .typed<softmax_Dimname::schema>();
}

// aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor softmax_Dimname::call(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_softmax_Dimname_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor softmax_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_softmax_Dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_softmax::schema> create__softmax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_softmax::name, _softmax::overload_name)
      .typed<_softmax::schema>();
}

// aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
at::Tensor _softmax::call(const at::Tensor & self, int64_t dim, bool half_to_float) {
    
    static auto op = create__softmax_typed_handle();
    return op.call(self, dim, half_to_float);
}

// aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
at::Tensor _softmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) {
    
    static auto op = create__softmax_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, half_to_float);
}

// aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_softmax_out::schema> create__softmax_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_softmax_out::name, _softmax_out::overload_name)
      .typed<_softmax_out::schema>();
}

// aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _softmax_out::call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
    
    static auto op = create__softmax_out_typed_handle();
    return op.call(self, dim, half_to_float, out);
}

// aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _softmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
    
    static auto op = create__softmax_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, half_to_float, out);
}

// aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_softmax_backward_data::schema> create__softmax_backward_data_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_softmax_backward_data::name, _softmax_backward_data::overload_name)
      .typed<_softmax_backward_data::schema>();
}

// aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
at::Tensor _softmax_backward_data::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
    
    static auto op = create__softmax_backward_data_typed_handle();
    return op.call(grad_output, output, dim, input_dtype);
}

// aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
at::Tensor _softmax_backward_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
    
    static auto op = create__softmax_backward_data_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, dim, input_dtype);
}

// aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_softmax_backward_data_out::schema> create__softmax_backward_data_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_softmax_backward_data_out::name, _softmax_backward_data_out::overload_name)
      .typed<_softmax_backward_data_out::schema>();
}

// aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _softmax_backward_data_out::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input) {
    
    static auto op = create__softmax_backward_data_out_typed_handle();
    return op.call(grad_output, output, dim, input_dtype, grad_input);
}

// aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _softmax_backward_data_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input) {
    
    static auto op = create__softmax_backward_data_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, dim, input_dtype, grad_input);
}

// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<unsafe_split_Tensor::schema> create_unsafe_split_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unsafe_split_Tensor::name, unsafe_split_Tensor::overload_name)
      .typed<unsafe_split_Tensor::schema>();
}

// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
::std::vector<at::Tensor> unsafe_split_Tensor::call(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
    
    static auto op = create_unsafe_split_Tensor_typed_handle();
    return op.call(self, split_size, dim);
}

// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
::std::vector<at::Tensor> unsafe_split_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
    
    static auto op = create_unsafe_split_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, split_size, dim);
}

// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<split_Tensor::schema> create_split_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(split_Tensor::name, split_Tensor::overload_name)
      .typed<split_Tensor::schema>();
}

// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> split_Tensor::call(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
    
    static auto op = create_split_Tensor_typed_handle();
    return op.call(self, split_size, dim);
}

// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> split_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
    
    static auto op = create_split_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, split_size, dim);
}

// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<split_sizes::schema> create_split_sizes_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(split_sizes::name, split_sizes::overload_name)
      .typed<split_sizes::schema>();
}

// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> split_sizes::call(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) {
    
    static auto op = create_split_sizes_typed_handle();
    return op.call(self, split_size, dim);
}

// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> split_sizes::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) {
    
    static auto op = create_split_sizes_typed_handle();
    return op.redispatch(dispatchKeySet, self, split_size, dim);
}

// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<unsafe_split_with_sizes::schema> create_unsafe_split_with_sizes_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unsafe_split_with_sizes::name, unsafe_split_with_sizes::overload_name)
      .typed<unsafe_split_with_sizes::schema>();
}

// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
::std::vector<at::Tensor> unsafe_split_with_sizes::call(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
    
    static auto op = create_unsafe_split_with_sizes_typed_handle();
    return op.call(self, split_sizes, dim);
}

// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
::std::vector<at::Tensor> unsafe_split_with_sizes::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
    
    static auto op = create_unsafe_split_with_sizes_typed_handle();
    return op.redispatch(dispatchKeySet, self, split_sizes, dim);
}

// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<split_with_sizes::schema> create_split_with_sizes_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(split_with_sizes::name, split_with_sizes::overload_name)
      .typed<split_with_sizes::schema>();
}

// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> split_with_sizes::call(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
    
    static auto op = create_split_with_sizes_typed_handle();
    return op.call(self, split_sizes, dim);
}

// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> split_with_sizes::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
    
    static auto op = create_split_with_sizes_typed_handle();
    return op.redispatch(dispatchKeySet, self, split_sizes, dim);
}

// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<hsplit_int::schema> create_hsplit_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hsplit_int::name, hsplit_int::overload_name)
      .typed<hsplit_int::schema>();
}

// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
::std::vector<at::Tensor> hsplit_int::call(const at::Tensor & self, int64_t sections) {
    
    static auto op = create_hsplit_int_typed_handle();
    return op.call(self, sections);
}

// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
::std::vector<at::Tensor> hsplit_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections) {
    
    static auto op = create_hsplit_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, sections);
}

// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<hsplit_array::schema> create_hsplit_array_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hsplit_array::name, hsplit_array::overload_name)
      .typed<hsplit_array::schema>();
}

// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
::std::vector<at::Tensor> hsplit_array::call(const at::Tensor & self, at::IntArrayRef indices) {
    
    static auto op = create_hsplit_array_typed_handle();
    return op.call(self, indices);
}

// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
::std::vector<at::Tensor> hsplit_array::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices) {
    
    static auto op = create_hsplit_array_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices);
}

// aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<vsplit_int::schema> create_vsplit_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(vsplit_int::name, vsplit_int::overload_name)
      .typed<vsplit_int::schema>();
}

// aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
::std::vector<at::Tensor> vsplit_int::call(const at::Tensor & self, int64_t sections) {
    
    static auto op = create_vsplit_int_typed_handle();
    return op.call(self, sections);
}

// aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
::std::vector<at::Tensor> vsplit_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections) {
    
    static auto op = create_vsplit_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, sections);
}

// aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<vsplit_array::schema> create_vsplit_array_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(vsplit_array::name, vsplit_array::overload_name)
      .typed<vsplit_array::schema>();
}

// aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
::std::vector<at::Tensor> vsplit_array::call(const at::Tensor & self, at::IntArrayRef indices) {
    
    static auto op = create_vsplit_array_typed_handle();
    return op.call(self, indices);
}

// aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
::std::vector<at::Tensor> vsplit_array::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices) {
    
    static auto op = create_vsplit_array_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices);
}

// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<dsplit_int::schema> create_dsplit_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dsplit_int::name, dsplit_int::overload_name)
      .typed<dsplit_int::schema>();
}

// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
::std::vector<at::Tensor> dsplit_int::call(const at::Tensor & self, int64_t sections) {
    
    static auto op = create_dsplit_int_typed_handle();
    return op.call(self, sections);
}

// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
::std::vector<at::Tensor> dsplit_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections) {
    
    static auto op = create_dsplit_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, sections);
}

// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<dsplit_array::schema> create_dsplit_array_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dsplit_array::name, dsplit_array::overload_name)
      .typed<dsplit_array::schema>();
}

// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
::std::vector<at::Tensor> dsplit_array::call(const at::Tensor & self, at::IntArrayRef indices) {
    
    static auto op = create_dsplit_array_typed_handle();
    return op.call(self, indices);
}

// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
::std::vector<at::Tensor> dsplit_array::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices) {
    
    static auto op = create_dsplit_array_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices);
}

// aten::squeeze(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<squeeze::schema> create_squeeze_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(squeeze::name, squeeze::overload_name)
      .typed<squeeze::schema>();
}

// aten::squeeze(Tensor(a) self) -> Tensor(a)
at::Tensor squeeze::call(const at::Tensor & self) {
    
    static auto op = create_squeeze_typed_handle();
    return op.call(self);
}

// aten::squeeze(Tensor(a) self) -> Tensor(a)
at::Tensor squeeze::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_squeeze_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<squeeze_dim::schema> create_squeeze_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(squeeze_dim::name, squeeze_dim::overload_name)
      .typed<squeeze_dim::schema>();
}

// aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)
at::Tensor squeeze_dim::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_squeeze_dim_typed_handle();
    return op.call(self, dim);
}

// aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)
at::Tensor squeeze_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_squeeze_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<squeeze_dimname::schema> create_squeeze_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(squeeze_dimname::name, squeeze_dimname::overload_name)
      .typed<squeeze_dimname::schema>();
}

// aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)
at::Tensor squeeze_dimname::call(const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_squeeze_dimname_typed_handle();
    return op.call(self, dim);
}

// aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)
at::Tensor squeeze_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_squeeze_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<squeeze_dims::schema> create_squeeze_dims_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(squeeze_dims::name, squeeze_dims::overload_name)
      .typed<squeeze_dims::schema>();
}

// aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)
at::Tensor squeeze_dims::call(const at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create_squeeze_dims_typed_handle();
    return op.call(self, dim);
}

// aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)
at::Tensor squeeze_dims::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create_squeeze_dims_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::squeeze_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<squeeze_::schema> create_squeeze__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(squeeze_::name, squeeze_::overload_name)
      .typed<squeeze_::schema>();
}

// aten::squeeze_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & squeeze_::call(at::Tensor & self) {
    
    static auto op = create_squeeze__typed_handle();
    return op.call(self);
}

// aten::squeeze_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & squeeze_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_squeeze__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<squeeze__dim::schema> create_squeeze__dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(squeeze__dim::name, squeeze__dim::overload_name)
      .typed<squeeze__dim::schema>();
}

// aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)
at::Tensor & squeeze__dim::call(at::Tensor & self, int64_t dim) {
    
    static auto op = create_squeeze__dim_typed_handle();
    return op.call(self, dim);
}

// aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)
at::Tensor & squeeze__dim::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim) {
    
    static auto op = create_squeeze__dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<squeeze__dims::schema> create_squeeze__dims_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(squeeze__dims::name, squeeze__dims::overload_name)
      .typed<squeeze__dims::schema>();
}

// aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)
at::Tensor & squeeze__dims::call(at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create_squeeze__dims_typed_handle();
    return op.call(self, dim);
}

// aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)
at::Tensor & squeeze__dims::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create_squeeze__dims_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<squeeze__dimname::schema> create_squeeze__dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(squeeze__dimname::name, squeeze__dimname::overload_name)
      .typed<squeeze__dimname::schema>();
}

// aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)
at::Tensor & squeeze__dimname::call(at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_squeeze__dimname_typed_handle();
    return op.call(self, dim);
}

// aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)
at::Tensor & squeeze__dimname::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_squeeze__dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sspaddmm::schema> create_sspaddmm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sspaddmm::name, sspaddmm::overload_name)
      .typed<sspaddmm::schema>();
}

// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor sspaddmm::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_sspaddmm_typed_handle();
    return op.call(self, mat1, mat2, beta, alpha);
}

// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor sspaddmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_sspaddmm_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha);
}

// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sspaddmm_out::schema> create_sspaddmm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sspaddmm_out::name, sspaddmm_out::overload_name)
      .typed<sspaddmm_out::schema>();
}

// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sspaddmm_out::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_sspaddmm_out_typed_handle();
    return op.call(self, mat1, mat2, beta, alpha, out);
}

// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sspaddmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_sspaddmm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out);
}

// aten::_chunk_cat(Tensor[] tensors, int dim, int num_chunks) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_chunk_cat::schema> create__chunk_cat_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_chunk_cat::name, _chunk_cat::overload_name)
      .typed<_chunk_cat::schema>();
}

// aten::_chunk_cat(Tensor[] tensors, int dim, int num_chunks) -> Tensor
at::Tensor _chunk_cat::call(at::TensorList tensors, int64_t dim, int64_t num_chunks) {
    
    static auto op = create__chunk_cat_typed_handle();
    return op.call(tensors, dim, num_chunks);
}

// aten::_chunk_cat(Tensor[] tensors, int dim, int num_chunks) -> Tensor
at::Tensor _chunk_cat::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, int64_t num_chunks) {
    
    static auto op = create__chunk_cat_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim, num_chunks);
}

// aten::_chunk_cat.out(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_chunk_cat_out::schema> create__chunk_cat_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_chunk_cat_out::name, _chunk_cat_out::overload_name)
      .typed<_chunk_cat_out::schema>();
}

// aten::_chunk_cat.out(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _chunk_cat_out::call(at::TensorList tensors, int64_t dim, int64_t num_chunks, at::Tensor & out) {
    
    static auto op = create__chunk_cat_out_typed_handle();
    return op.call(tensors, dim, num_chunks, out);
}

// aten::_chunk_cat.out(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _chunk_cat_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, int64_t num_chunks, at::Tensor & out) {
    
    static auto op = create__chunk_cat_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim, num_chunks, out);
}

// aten::stack(Tensor[] tensors, int dim=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<stack::schema> create_stack_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(stack::name, stack::overload_name)
      .typed<stack::schema>();
}

// aten::stack(Tensor[] tensors, int dim=0) -> Tensor
at::Tensor stack::call(at::TensorList tensors, int64_t dim) {
    
    static auto op = create_stack_typed_handle();
    return op.call(tensors, dim);
}

// aten::stack(Tensor[] tensors, int dim=0) -> Tensor
at::Tensor stack::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim) {
    
    static auto op = create_stack_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim);
}

// aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<stack_out::schema> create_stack_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(stack_out::name, stack_out::overload_name)
      .typed<stack_out::schema>();
}

// aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & stack_out::call(at::TensorList tensors, int64_t dim, at::Tensor & out) {
    
    static auto op = create_stack_out_typed_handle();
    return op.call(tensors, dim, out);
}

// aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & stack_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) {
    
    static auto op = create_stack_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim, out);
}

// aten::_stack(Tensor[] tensors, int dim=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_stack::schema> create__stack_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_stack::name, _stack::overload_name)
      .typed<_stack::schema>();
}

// aten::_stack(Tensor[] tensors, int dim=0) -> Tensor
at::Tensor _stack::call(at::TensorList tensors, int64_t dim) {
    
    static auto op = create__stack_typed_handle();
    return op.call(tensors, dim);
}

// aten::_stack(Tensor[] tensors, int dim=0) -> Tensor
at::Tensor _stack::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim) {
    
    static auto op = create__stack_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim);
}

// aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_stack_out::schema> create__stack_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_stack_out::name, _stack_out::overload_name)
      .typed<_stack_out::schema>();
}

// aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _stack_out::call(at::TensorList tensors, int64_t dim, at::Tensor & out) {
    
    static auto op = create__stack_out_typed_handle();
    return op.call(tensors, dim, out);
}

// aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _stack_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) {
    
    static auto op = create__stack_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, dim, out);
}

// aten::hstack(Tensor[] tensors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hstack::schema> create_hstack_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hstack::name, hstack::overload_name)
      .typed<hstack::schema>();
}

// aten::hstack(Tensor[] tensors) -> Tensor
at::Tensor hstack::call(at::TensorList tensors) {
    
    static auto op = create_hstack_typed_handle();
    return op.call(tensors);
}

// aten::hstack(Tensor[] tensors) -> Tensor
at::Tensor hstack::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_hstack_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hstack_out::schema> create_hstack_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hstack_out::name, hstack_out::overload_name)
      .typed<hstack_out::schema>();
}

// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hstack_out::call(at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_hstack_out_typed_handle();
    return op.call(tensors, out);
}

// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hstack_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_hstack_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, out);
}

// aten::vstack(Tensor[] tensors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<vstack::schema> create_vstack_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(vstack::name, vstack::overload_name)
      .typed<vstack::schema>();
}

// aten::vstack(Tensor[] tensors) -> Tensor
at::Tensor vstack::call(at::TensorList tensors) {
    
    static auto op = create_vstack_typed_handle();
    return op.call(tensors);
}

// aten::vstack(Tensor[] tensors) -> Tensor
at::Tensor vstack::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_vstack_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<vstack_out::schema> create_vstack_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(vstack_out::name, vstack_out::overload_name)
      .typed<vstack_out::schema>();
}

// aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & vstack_out::call(at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_vstack_out_typed_handle();
    return op.call(tensors, out);
}

// aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & vstack_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_vstack_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, out);
}

// aten::dstack(Tensor[] tensors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<dstack::schema> create_dstack_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dstack::name, dstack::overload_name)
      .typed<dstack::schema>();
}

// aten::dstack(Tensor[] tensors) -> Tensor
at::Tensor dstack::call(at::TensorList tensors) {
    
    static auto op = create_dstack_typed_handle();
    return op.call(tensors);
}

// aten::dstack(Tensor[] tensors) -> Tensor
at::Tensor dstack::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_dstack_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<dstack_out::schema> create_dstack_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dstack_out::name, dstack_out::overload_name)
      .typed<dstack_out::schema>();
}

// aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & dstack_out::call(at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_dstack_out_typed_handle();
    return op.call(tensors, out);
}

// aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & dstack_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_dstack_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, out);
}

// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None, bool? align_to_window=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<stft::schema> create_stft_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(stft::name, stft::overload_name)
      .typed<stft::schema>();
}

// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None, bool? align_to_window=None) -> Tensor
at::Tensor stft::call(const at::Tensor & self, int64_t n_fft, ::std::optional<int64_t> hop_length, ::std::optional<int64_t> win_length, const ::std::optional<at::Tensor> & window, bool normalized, ::std::optional<bool> onesided, ::std::optional<bool> return_complex, ::std::optional<bool> align_to_window) {
    
    static auto op = create_stft_typed_handle();
    return op.call(self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex, align_to_window);
}

// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None, bool? align_to_window=None) -> Tensor
at::Tensor stft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n_fft, ::std::optional<int64_t> hop_length, ::std::optional<int64_t> win_length, const ::std::optional<at::Tensor> & window, bool normalized, ::std::optional<bool> onesided, ::std::optional<bool> return_complex, ::std::optional<bool> align_to_window) {
    
    static auto op = create_stft_typed_handle();
    return op.redispatch(dispatchKeySet, self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex, align_to_window);
}

// aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None, bool? align_to_window=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<stft_center::schema> create_stft_center_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(stft_center::name, stft_center::overload_name)
      .typed<stft_center::schema>();
}

// aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None, bool? align_to_window=None) -> Tensor
at::Tensor stft_center::call(const at::Tensor & self, int64_t n_fft, ::std::optional<int64_t> hop_length, ::std::optional<int64_t> win_length, const ::std::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, ::std::optional<bool> onesided, ::std::optional<bool> return_complex, ::std::optional<bool> align_to_window) {
    
    static auto op = create_stft_center_typed_handle();
    return op.call(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex, align_to_window);
}

// aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None, bool? align_to_window=None) -> Tensor
at::Tensor stft_center::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n_fft, ::std::optional<int64_t> hop_length, ::std::optional<int64_t> win_length, const ::std::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, ::std::optional<bool> onesided, ::std::optional<bool> return_complex, ::std::optional<bool> align_to_window) {
    
    static auto op = create_stft_center_typed_handle();
    return op.redispatch(dispatchKeySet, self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex, align_to_window);
}

// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<istft::schema> create_istft_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(istft::name, istft::overload_name)
      .typed<istft::schema>();
}

// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor
at::Tensor istft::call(const at::Tensor & self, int64_t n_fft, ::std::optional<int64_t> hop_length, ::std::optional<int64_t> win_length, const ::std::optional<at::Tensor> & window, bool center, bool normalized, ::std::optional<bool> onesided, ::std::optional<int64_t> length, bool return_complex) {
    
    static auto op = create_istft_typed_handle();
    return op.call(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex);
}

// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor
at::Tensor istft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n_fft, ::std::optional<int64_t> hop_length, ::std::optional<int64_t> win_length, const ::std::optional<at::Tensor> & window, bool center, bool normalized, ::std::optional<bool> onesided, ::std::optional<int64_t> length, bool return_complex) {
    
    static auto op = create_istft_typed_handle();
    return op.redispatch(dispatchKeySet, self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex);
}

// aten::stride.int(Tensor self, int dim) -> int
static C10_NOINLINE c10::TypedOperatorHandle<stride_int::schema> create_stride_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(stride_int::name, stride_int::overload_name)
      .typed<stride_int::schema>();
}

// aten::stride.int(Tensor self, int dim) -> int
int64_t stride_int::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_stride_int_typed_handle();
    return op.call(self, dim);
}

// aten::stride.int(Tensor self, int dim) -> int
int64_t stride_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_stride_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::stride.Dimname(Tensor self, Dimname dim) -> int
static C10_NOINLINE c10::TypedOperatorHandle<stride_Dimname::schema> create_stride_Dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(stride_Dimname::name, stride_Dimname::overload_name)
      .typed<stride_Dimname::schema>();
}

// aten::stride.Dimname(Tensor self, Dimname dim) -> int
int64_t stride_Dimname::call(const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_stride_Dimname_typed_handle();
    return op.call(self, dim);
}

// aten::stride.Dimname(Tensor self, Dimname dim) -> int
int64_t stride_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_stride_Dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::sym_stride.int(Tensor self, int dim) -> SymInt
static C10_NOINLINE c10::TypedOperatorHandle<sym_stride_int::schema> create_sym_stride_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sym_stride_int::name, sym_stride_int::overload_name)
      .typed<sym_stride_int::schema>();
}

// aten::sym_stride.int(Tensor self, int dim) -> SymInt
c10::SymInt sym_stride_int::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_sym_stride_int_typed_handle();
    return op.call(self, dim);
}

// aten::sym_stride.int(Tensor self, int dim) -> SymInt
c10::SymInt sym_stride_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_sym_stride_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sum::schema> create_sum_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sum::name, sum::overload_name)
      .typed<sum::schema>();
}

// aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor
at::Tensor sum::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_sum_typed_handle();
    return op.call(self, dtype);
}

// aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor
at::Tensor sum::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_sum_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype);
}

// aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sum_dim_IntList::schema> create_sum_dim_IntList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sum_dim_IntList::name, sum_dim_IntList::overload_name)
      .typed<sum_dim_IntList::schema>();
}

// aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor sum_dim_IntList::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_sum_dim_IntList_typed_handle();
    return op.call(self, dim, keepdim, dtype);
}

// aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor sum_dim_IntList::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_sum_dim_IntList_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
}

// aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sum_dim_DimnameList::schema> create_sum_dim_DimnameList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sum_dim_DimnameList::name, sum_dim_DimnameList::overload_name)
      .typed<sum_dim_DimnameList::schema>();
}

// aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor sum_dim_DimnameList::call(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_sum_dim_DimnameList_typed_handle();
    return op.call(self, dim, keepdim, dtype);
}

// aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor sum_dim_DimnameList::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_sum_dim_DimnameList_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
}

// aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sum_IntList_out::schema> create_sum_IntList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sum_IntList_out::name, sum_IntList_out::overload_name)
      .typed<sum_IntList_out::schema>();
}

// aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sum_IntList_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_sum_IntList_out_typed_handle();
    return op.call(self, dim, keepdim, dtype, out);
}

// aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sum_IntList_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_sum_IntList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
}

// aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sum_DimnameList_out::schema> create_sum_DimnameList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sum_DimnameList_out::name, sum_DimnameList_out::overload_name)
      .typed<sum_DimnameList_out::schema>();
}

// aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sum_DimnameList_out::call(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_sum_DimnameList_out_typed_handle();
    return op.call(self, dim, keepdim, dtype, out);
}

// aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sum_DimnameList_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_sum_DimnameList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
}

// aten::_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_sum_backward::schema> create__nested_sum_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_sum_backward::name, _nested_sum_backward::overload_name)
      .typed<_nested_sum_backward::schema>();
}

// aten::_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor
at::Tensor _nested_sum_backward::call(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
    
    static auto op = create__nested_sum_backward_typed_handle();
    return op.call(grad, self, dim, keepdim);
}

// aten::_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor
at::Tensor _nested_sum_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
    
    static auto op = create__nested_sum_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, self, dim, keepdim);
}

// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nansum::schema> create_nansum_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nansum::name, nansum::overload_name)
      .typed<nansum::schema>();
}

// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor nansum::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_nansum_typed_handle();
    return op.call(self, dim, keepdim, dtype);
}

// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor nansum::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_nansum_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
}

// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nansum_out::schema> create_nansum_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nansum_out::name, nansum_out::overload_name)
      .typed<nansum_out::schema>();
}

// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nansum_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_nansum_out_typed_handle();
    return op.call(self, dim, keepdim, dtype, out);
}

// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nansum_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_nansum_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
}

// aten::sum_to_size(Tensor self, SymInt[] size) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sum_to_size::schema> create_sum_to_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sum_to_size::name, sum_to_size::overload_name)
      .typed<sum_to_size::schema>();
}

// aten::sum_to_size(Tensor self, SymInt[] size) -> Tensor
at::Tensor sum_to_size::call(const at::Tensor & self, c10::SymIntArrayRef size) {
    
    static auto op = create_sum_to_size_typed_handle();
    return op.call(self, size);
}

// aten::sum_to_size(Tensor self, SymInt[] size) -> Tensor
at::Tensor sum_to_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) {
    
    static auto op = create_sum_to_size_typed_handle();
    return op.redispatch(dispatchKeySet, self, size);
}

// aten::sqrt(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sqrt::schema> create_sqrt_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sqrt::name, sqrt::overload_name)
      .typed<sqrt::schema>();
}

// aten::sqrt(Tensor self) -> Tensor
at::Tensor sqrt::call(const at::Tensor & self) {
    
    static auto op = create_sqrt_typed_handle();
    return op.call(self);
}

// aten::sqrt(Tensor self) -> Tensor
at::Tensor sqrt::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_sqrt_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sqrt_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sqrt_::schema> create_sqrt__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sqrt_::name, sqrt_::overload_name)
      .typed<sqrt_::schema>();
}

// aten::sqrt_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & sqrt_::call(at::Tensor & self) {
    
    static auto op = create_sqrt__typed_handle();
    return op.call(self);
}

// aten::sqrt_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & sqrt_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_sqrt__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sqrt_out::schema> create_sqrt_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sqrt_out::name, sqrt_out::overload_name)
      .typed<sqrt_out::schema>();
}

// aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sqrt_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_sqrt_out_typed_handle();
    return op.call(self, out);
}

// aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sqrt_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_sqrt_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::square(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<square::schema> create_square_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(square::name, square::overload_name)
      .typed<square::schema>();
}

// aten::square(Tensor self) -> Tensor
at::Tensor square::call(const at::Tensor & self) {
    
    static auto op = create_square_typed_handle();
    return op.call(self);
}

// aten::square(Tensor self) -> Tensor
at::Tensor square::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_square_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::square_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<square_::schema> create_square__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(square_::name, square_::overload_name)
      .typed<square_::schema>();
}

// aten::square_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & square_::call(at::Tensor & self) {
    
    static auto op = create_square__typed_handle();
    return op.call(self);
}

// aten::square_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & square_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_square__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<square_out::schema> create_square_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(square_out::name, square_out::overload_name)
      .typed<square_out::schema>();
}

// aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & square_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_square_out_typed_handle();
    return op.call(self, out);
}

// aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & square_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_square_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::std(Tensor self, bool unbiased=True) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<std::schema> create_std_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(std::name, std::overload_name)
      .typed<std::schema>();
}

// aten::std(Tensor self, bool unbiased=True) -> Tensor
at::Tensor std::call(const at::Tensor & self, bool unbiased) {
    
    static auto op = create_std_typed_handle();
    return op.call(self, unbiased);
}

// aten::std(Tensor self, bool unbiased=True) -> Tensor
at::Tensor std::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased) {
    
    static auto op = create_std_typed_handle();
    return op.redispatch(dispatchKeySet, self, unbiased);
}

// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<std_dim::schema> create_std_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(std_dim::name, std_dim::overload_name)
      .typed<std_dim::schema>();
}

// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
at::Tensor std_dim::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
    
    static auto op = create_std_dim_typed_handle();
    return op.call(self, dim, unbiased, keepdim);
}

// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
at::Tensor std_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
    
    static auto op = create_std_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
}

// aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<std_correction::schema> create_std_correction_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(std_correction::name, std_correction::overload_name)
      .typed<std_correction::schema>();
}

// aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor
at::Tensor std_correction::call(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_std_correction_typed_handle();
    return op.call(self, dim, correction, keepdim);
}

// aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor
at::Tensor std_correction::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_std_correction_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
}

// aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<std_mean::schema> create_std_mean_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(std_mean::name, std_mean::overload_name)
      .typed<std_mean::schema>();
}

// aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> std_mean::call(const at::Tensor & self, bool unbiased) {
    
    static auto op = create_std_mean_typed_handle();
    return op.call(self, unbiased);
}

// aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> std_mean::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased) {
    
    static auto op = create_std_mean_typed_handle();
    return op.redispatch(dispatchKeySet, self, unbiased);
}

// aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<std_mean_dim::schema> create_std_mean_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(std_mean_dim::name, std_mean_dim::overload_name)
      .typed<std_mean_dim::schema>();
}

// aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> std_mean_dim::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
    
    static auto op = create_std_mean_dim_typed_handle();
    return op.call(self, dim, unbiased, keepdim);
}

// aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> std_mean_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
    
    static auto op = create_std_mean_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
}

// aten::std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<std_mean_correction::schema> create_std_mean_correction_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(std_mean_correction::name, std_mean_correction::overload_name)
      .typed<std_mean_correction::schema>();
}

// aten::std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> std_mean_correction::call(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_std_mean_correction_typed_handle();
    return op.call(self, dim, correction, keepdim);
}

// aten::std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> std_mean_correction::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_std_mean_correction_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
}

// aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<std_mean_names_dim::schema> create_std_mean_names_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(std_mean_names_dim::name, std_mean_names_dim::overload_name)
      .typed<std_mean_names_dim::schema>();
}

// aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> std_mean_names_dim::call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
    
    static auto op = create_std_mean_names_dim_typed_handle();
    return op.call(self, dim, unbiased, keepdim);
}

// aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> std_mean_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
    
    static auto op = create_std_mean_names_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
}

// aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<std_mean_correction_names::schema> create_std_mean_correction_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(std_mean_correction_names::name, std_mean_correction_names::overload_name)
      .typed<std_mean_correction_names::schema>();
}

// aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> std_mean_correction_names::call(const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_std_mean_correction_names_typed_handle();
    return op.call(self, dim, correction, keepdim);
}

// aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> std_mean_correction_names::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_std_mean_correction_names_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
}

// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<std_out::schema> create_std_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(std_out::name, std_out::overload_name)
      .typed<std_out::schema>();
}

// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & std_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
    
    static auto op = create_std_out_typed_handle();
    return op.call(self, dim, unbiased, keepdim, out);
}

// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & std_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
    
    static auto op = create_std_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out);
}

// aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<std_correction_out::schema> create_std_correction_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(std_correction_out::name, std_correction_out::overload_name)
      .typed<std_correction_out::schema>();
}

// aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & std_correction_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
    
    static auto op = create_std_correction_out_typed_handle();
    return op.call(self, dim, correction, keepdim, out);
}

// aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & std_correction_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
    
    static auto op = create_std_correction_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim, out);
}

// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<std_names_dim::schema> create_std_names_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(std_names_dim::name, std_names_dim::overload_name)
      .typed<std_names_dim::schema>();
}

// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
at::Tensor std_names_dim::call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
    
    static auto op = create_std_names_dim_typed_handle();
    return op.call(self, dim, unbiased, keepdim);
}

// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
at::Tensor std_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
    
    static auto op = create_std_names_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
}

// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<std_names_out::schema> create_std_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(std_names_out::name, std_names_out::overload_name)
      .typed<std_names_out::schema>();
}

// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & std_names_out::call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
    
    static auto op = create_std_names_out_typed_handle();
    return op.call(self, dim, unbiased, keepdim, out);
}

// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & std_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
    
    static auto op = create_std_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out);
}

// aten::std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<std_correction_names::schema> create_std_correction_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(std_correction_names::name, std_correction_names::overload_name)
      .typed<std_correction_names::schema>();
}

// aten::std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor
at::Tensor std_correction_names::call(const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_std_correction_names_typed_handle();
    return op.call(self, dim, correction, keepdim);
}

// aten::std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor
at::Tensor std_correction_names::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_std_correction_names_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
}

// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<std_correction_names_out::schema> create_std_correction_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(std_correction_names_out::name, std_correction_names_out::overload_name)
      .typed<std_correction_names_out::schema>();
}

// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & std_correction_names_out::call(const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
    
    static auto op = create_std_correction_names_out_typed_handle();
    return op.call(self, dim, correction, keepdim, out);
}

// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & std_correction_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
    
    static auto op = create_std_correction_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim, out);
}

// aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<prod::schema> create_prod_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(prod::name, prod::overload_name)
      .typed<prod::schema>();
}

// aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor
at::Tensor prod::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_prod_typed_handle();
    return op.call(self, dtype);
}

// aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor
at::Tensor prod::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_prod_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype);
}

// aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<prod_dim_int::schema> create_prod_dim_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(prod_dim_int::name, prod_dim_int::overload_name)
      .typed<prod_dim_int::schema>();
}

// aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor prod_dim_int::call(const at::Tensor & self, int64_t dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_prod_dim_int_typed_handle();
    return op.call(self, dim, keepdim, dtype);
}

// aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor prod_dim_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_prod_dim_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
}

// aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<prod_int_out::schema> create_prod_int_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(prod_int_out::name, prod_int_out::overload_name)
      .typed<prod_int_out::schema>();
}

// aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & prod_int_out::call(const at::Tensor & self, int64_t dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_prod_int_out_typed_handle();
    return op.call(self, dim, keepdim, dtype, out);
}

// aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & prod_int_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_prod_int_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
}

// aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<prod_dim_Dimname::schema> create_prod_dim_Dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(prod_dim_Dimname::name, prod_dim_Dimname::overload_name)
      .typed<prod_dim_Dimname::schema>();
}

// aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor prod_dim_Dimname::call(const at::Tensor & self, at::Dimname dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_prod_dim_Dimname_typed_handle();
    return op.call(self, dim, keepdim, dtype);
}

// aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor prod_dim_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_prod_dim_Dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
}

// aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<prod_Dimname_out::schema> create_prod_Dimname_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(prod_Dimname_out::name, prod_Dimname_out::overload_name)
      .typed<prod_Dimname_out::schema>();
}

// aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & prod_Dimname_out::call(const at::Tensor & self, at::Dimname dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_prod_Dimname_out_typed_handle();
    return op.call(self, dim, keepdim, dtype, out);
}

// aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & prod_Dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_prod_Dimname_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
}

// aten::t(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<t::schema> create_t_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(t::name, t::overload_name)
      .typed<t::schema>();
}

// aten::t(Tensor(a) self) -> Tensor(a)
at::Tensor t::call(const at::Tensor & self) {
    
    static auto op = create_t_typed_handle();
    return op.call(self);
}

// aten::t(Tensor(a) self) -> Tensor(a)
at::Tensor t::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_t_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::t_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<t_::schema> create_t__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(t_::name, t_::overload_name)
      .typed<t_::schema>();
}

// aten::t_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & t_::call(at::Tensor & self) {
    
    static auto op = create_t__typed_handle();
    return op.call(self);
}

// aten::t_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & t_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_t__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::tan(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<tan::schema> create_tan_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tan::name, tan::overload_name)
      .typed<tan::schema>();
}

// aten::tan(Tensor self) -> Tensor
at::Tensor tan::call(const at::Tensor & self) {
    
    static auto op = create_tan_typed_handle();
    return op.call(self);
}

// aten::tan(Tensor self) -> Tensor
at::Tensor tan::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_tan_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::tan_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<tan_::schema> create_tan__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tan_::name, tan_::overload_name)
      .typed<tan_::schema>();
}

// aten::tan_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & tan_::call(at::Tensor & self) {
    
    static auto op = create_tan__typed_handle();
    return op.call(self);
}

// aten::tan_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & tan_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_tan__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<tan_out::schema> create_tan_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tan_out::name, tan_out::overload_name)
      .typed<tan_out::schema>();
}

// aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & tan_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_tan_out_typed_handle();
    return op.call(self, out);
}

// aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & tan_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_tan_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::tanh(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<tanh::schema> create_tanh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tanh::name, tanh::overload_name)
      .typed<tanh::schema>();
}

// aten::tanh(Tensor self) -> Tensor
at::Tensor tanh::call(const at::Tensor & self) {
    
    static auto op = create_tanh_typed_handle();
    return op.call(self);
}

// aten::tanh(Tensor self) -> Tensor
at::Tensor tanh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_tanh_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::tanh_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<tanh_::schema> create_tanh__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tanh_::name, tanh_::overload_name)
      .typed<tanh_::schema>();
}

// aten::tanh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & tanh_::call(at::Tensor & self) {
    
    static auto op = create_tanh__typed_handle();
    return op.call(self);
}

// aten::tanh_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & tanh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_tanh__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<tanh_out::schema> create_tanh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tanh_out::name, tanh_out::overload_name)
      .typed<tanh_out::schema>();
}

// aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & tanh_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_tanh_out_typed_handle();
    return op.call(self, out);
}

// aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & tanh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_tanh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<tensordot::schema> create_tensordot_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tensordot::name, tensordot::overload_name)
      .typed<tensordot::schema>();
}

// aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor
at::Tensor tensordot::call(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) {
    
    static auto op = create_tensordot_typed_handle();
    return op.call(self, other, dims_self, dims_other);
}

// aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor
at::Tensor tensordot::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) {
    
    static auto op = create_tensordot_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, dims_self, dims_other);
}

// aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<tensordot_out::schema> create_tensordot_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tensordot_out::name, tensordot_out::overload_name)
      .typed<tensordot_out::schema>();
}

// aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & tensordot_out::call(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor & out) {
    
    static auto op = create_tensordot_out_typed_handle();
    return op.call(self, other, dims_self, dims_other, out);
}

// aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & tensordot_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor & out) {
    
    static auto op = create_tensordot_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, dims_self, dims_other, out);
}

// aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<threshold::schema> create_threshold_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(threshold::name, threshold::overload_name)
      .typed<threshold::schema>();
}

// aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor
at::Tensor threshold::call(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
    
    static auto op = create_threshold_typed_handle();
    return op.call(self, threshold, value);
}

// aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor
at::Tensor threshold::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
    
    static auto op = create_threshold_typed_handle();
    return op.redispatch(dispatchKeySet, self, threshold, value);
}

// aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<threshold_::schema> create_threshold__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(threshold_::name, threshold_::overload_name)
      .typed<threshold_::schema>();
}

// aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)
at::Tensor & threshold_::call(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
    
    static auto op = create_threshold__typed_handle();
    return op.call(self, threshold, value);
}

// aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)
at::Tensor & threshold_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
    
    static auto op = create_threshold__typed_handle();
    return op.redispatch(dispatchKeySet, self, threshold, value);
}

// aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<threshold_out::schema> create_threshold_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(threshold_out::name, threshold_out::overload_name)
      .typed<threshold_out::schema>();
}

// aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & threshold_out::call(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_threshold_out_typed_handle();
    return op.call(self, threshold, value, out);
}

// aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & threshold_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_threshold_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, threshold, value, out);
}

// aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<threshold_backward_grad_input::schema> create_threshold_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(threshold_backward_grad_input::name, threshold_backward_grad_input::overload_name)
      .typed<threshold_backward_grad_input::schema>();
}

// aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & threshold_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
    
    static auto op = create_threshold_backward_grad_input_typed_handle();
    return op.call(grad_output, self, threshold, grad_input);
}

// aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & threshold_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
    
    static auto op = create_threshold_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, threshold, grad_input);
}

// aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<threshold_backward::schema> create_threshold_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(threshold_backward::name, threshold_backward::overload_name)
      .typed<threshold_backward::schema>();
}

// aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor
at::Tensor threshold_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
    
    static auto op = create_threshold_backward_typed_handle();
    return op.call(grad_output, self, threshold);
}

// aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor
at::Tensor threshold_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
    
    static auto op = create_threshold_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, threshold);
}

// aten::tile(Tensor self, SymInt[] dims) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<tile::schema> create_tile_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tile::name, tile::overload_name)
      .typed<tile::schema>();
}

// aten::tile(Tensor self, SymInt[] dims) -> Tensor
at::Tensor tile::call(const at::Tensor & self, c10::SymIntArrayRef dims) {
    
    static auto op = create_tile_typed_handle();
    return op.call(self, dims);
}

// aten::tile(Tensor self, SymInt[] dims) -> Tensor
at::Tensor tile::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef dims) {
    
    static auto op = create_tile_typed_handle();
    return op.redispatch(dispatchKeySet, self, dims);
}

// aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<transpose_int::schema> create_transpose_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(transpose_int::name, transpose_int::overload_name)
      .typed<transpose_int::schema>();
}

// aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
at::Tensor transpose_int::call(const at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create_transpose_int_typed_handle();
    return op.call(self, dim0, dim1);
}

// aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
at::Tensor transpose_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create_transpose_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim0, dim1);
}

// aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<transpose_Dimname::schema> create_transpose_Dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(transpose_Dimname::name, transpose_Dimname::overload_name)
      .typed<transpose_Dimname::schema>();
}

// aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
at::Tensor transpose_Dimname::call(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) {
    
    static auto op = create_transpose_Dimname_typed_handle();
    return op.call(self, dim0, dim1);
}

// aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
at::Tensor transpose_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) {
    
    static auto op = create_transpose_Dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim0, dim1);
}

// aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_mkldnn_transpose::schema> create__mkldnn_transpose_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_mkldnn_transpose::name, _mkldnn_transpose::overload_name)
      .typed<_mkldnn_transpose::schema>();
}

// aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor
at::Tensor _mkldnn_transpose::call(const at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create__mkldnn_transpose_typed_handle();
    return op.call(self, dim0, dim1);
}

// aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor
at::Tensor _mkldnn_transpose::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create__mkldnn_transpose_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim0, dim1);
}

// aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<transpose_::schema> create_transpose__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(transpose_::name, transpose_::overload_name)
      .typed<transpose_::schema>();
}

// aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
at::Tensor & transpose_::call(at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create_transpose__typed_handle();
    return op.call(self, dim0, dim1);
}

// aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
at::Tensor & transpose_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create_transpose__typed_handle();
    return op.redispatch(dispatchKeySet, self, dim0, dim1);
}

// aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_mkldnn_transpose_::schema> create__mkldnn_transpose__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_mkldnn_transpose_::name, _mkldnn_transpose_::overload_name)
      .typed<_mkldnn_transpose_::schema>();
}

// aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
at::Tensor & _mkldnn_transpose_::call(at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create__mkldnn_transpose__typed_handle();
    return op.call(self, dim0, dim1);
}

// aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
at::Tensor & _mkldnn_transpose_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create__mkldnn_transpose__typed_handle();
    return op.redispatch(dispatchKeySet, self, dim0, dim1);
}

// aten::one_hot(Tensor self, int num_classes=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<one_hot::schema> create_one_hot_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(one_hot::name, one_hot::overload_name)
      .typed<one_hot::schema>();
}

// aten::one_hot(Tensor self, int num_classes=-1) -> Tensor
at::Tensor one_hot::call(const at::Tensor & self, int64_t num_classes) {
    
    static auto op = create_one_hot_typed_handle();
    return op.call(self, num_classes);
}

// aten::one_hot(Tensor self, int num_classes=-1) -> Tensor
at::Tensor one_hot::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_classes) {
    
    static auto op = create_one_hot_typed_handle();
    return op.redispatch(dispatchKeySet, self, num_classes);
}

// aten::flip(Tensor self, int[] dims) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<flip::schema> create_flip_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(flip::name, flip::overload_name)
      .typed<flip::schema>();
}

// aten::flip(Tensor self, int[] dims) -> Tensor
at::Tensor flip::call(const at::Tensor & self, at::IntArrayRef dims) {
    
    static auto op = create_flip_typed_handle();
    return op.call(self, dims);
}

// aten::flip(Tensor self, int[] dims) -> Tensor
at::Tensor flip::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) {
    
    static auto op = create_flip_typed_handle();
    return op.redispatch(dispatchKeySet, self, dims);
}

// aten::fliplr(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fliplr::schema> create_fliplr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fliplr::name, fliplr::overload_name)
      .typed<fliplr::schema>();
}

// aten::fliplr(Tensor self) -> Tensor
at::Tensor fliplr::call(const at::Tensor & self) {
    
    static auto op = create_fliplr_typed_handle();
    return op.call(self);
}

// aten::fliplr(Tensor self) -> Tensor
at::Tensor fliplr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_fliplr_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::flipud(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<flipud::schema> create_flipud_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(flipud::name, flipud::overload_name)
      .typed<flipud::schema>();
}

// aten::flipud(Tensor self) -> Tensor
at::Tensor flipud::call(const at::Tensor & self) {
    
    static auto op = create_flipud_typed_handle();
    return op.call(self);
}

// aten::flipud(Tensor self) -> Tensor
at::Tensor flipud::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_flipud_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<roll::schema> create_roll_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(roll::name, roll::overload_name)
      .typed<roll::schema>();
}

// aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor
at::Tensor roll::call(const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims) {
    
    static auto op = create_roll_typed_handle();
    return op.call(self, shifts, dims);
}

// aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor
at::Tensor roll::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims) {
    
    static auto op = create_roll_typed_handle();
    return op.redispatch(dispatchKeySet, self, shifts, dims);
}

// aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rot90::schema> create_rot90_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rot90::name, rot90::overload_name)
      .typed<rot90::schema>();
}

// aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor
at::Tensor rot90::call(const at::Tensor & self, int64_t k, at::IntArrayRef dims) {
    
    static auto op = create_rot90_typed_handle();
    return op.call(self, k, dims);
}

// aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor
at::Tensor rot90::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::IntArrayRef dims) {
    
    static auto op = create_rot90_typed_handle();
    return op.redispatch(dispatchKeySet, self, k, dims);
}

// aten::trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<trapezoid_x::schema> create_trapezoid_x_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(trapezoid_x::name, trapezoid_x::overload_name)
      .typed<trapezoid_x::schema>();
}

// aten::trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
at::Tensor trapezoid_x::call(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
    
    static auto op = create_trapezoid_x_typed_handle();
    return op.call(y, x, dim);
}

// aten::trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
at::Tensor trapezoid_x::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim) {
    
    static auto op = create_trapezoid_x_typed_handle();
    return op.redispatch(dispatchKeySet, y, x, dim);
}

// aten::trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<trapezoid_dx::schema> create_trapezoid_dx_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(trapezoid_dx::name, trapezoid_dx::overload_name)
      .typed<trapezoid_dx::schema>();
}

// aten::trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
at::Tensor trapezoid_dx::call(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
    
    static auto op = create_trapezoid_dx_typed_handle();
    return op.call(y, dx, dim);
}

// aten::trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
at::Tensor trapezoid_dx::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
    
    static auto op = create_trapezoid_dx_typed_handle();
    return op.redispatch(dispatchKeySet, y, dx, dim);
}

// aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<trapz_x::schema> create_trapz_x_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(trapz_x::name, trapz_x::overload_name)
      .typed<trapz_x::schema>();
}

// aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
at::Tensor trapz_x::call(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
    
    static auto op = create_trapz_x_typed_handle();
    return op.call(y, x, dim);
}

// aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
at::Tensor trapz_x::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim) {
    
    static auto op = create_trapz_x_typed_handle();
    return op.redispatch(dispatchKeySet, y, x, dim);
}

// aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<trapz_dx::schema> create_trapz_dx_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(trapz_dx::name, trapz_dx::overload_name)
      .typed<trapz_dx::schema>();
}

// aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor
at::Tensor trapz_dx::call(const at::Tensor & y, double dx, int64_t dim) {
    
    static auto op = create_trapz_dx_typed_handle();
    return op.call(y, dx, dim);
}

// aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor
at::Tensor trapz_dx::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, double dx, int64_t dim) {
    
    static auto op = create_trapz_dx_typed_handle();
    return op.redispatch(dispatchKeySet, y, dx, dim);
}

// aten::_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_transform_bias_rescale_qkv::schema> create__transform_bias_rescale_qkv_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_transform_bias_rescale_qkv::name, _transform_bias_rescale_qkv::overload_name)
      .typed<_transform_bias_rescale_qkv::schema>();
}

// aten::_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transform_bias_rescale_qkv::call(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) {
    
    static auto op = create__transform_bias_rescale_qkv_typed_handle();
    return op.call(qkv, qkv_bias, num_heads);
}

// aten::_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transform_bias_rescale_qkv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) {
    
    static auto op = create__transform_bias_rescale_qkv_typed_handle();
    return op.redispatch(dispatchKeySet, qkv, qkv_bias, num_heads);
}

// aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_from_mask::schema> create__nested_tensor_from_mask_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_from_mask::name, _nested_tensor_from_mask::overload_name)
      .typed<_nested_tensor_from_mask::schema>();
}

// aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor
at::Tensor _nested_tensor_from_mask::call(const at::Tensor & t, const at::Tensor & mask, bool mask_check) {
    
    static auto op = create__nested_tensor_from_mask_typed_handle();
    return op.call(t, mask, mask_check);
}

// aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor
at::Tensor _nested_tensor_from_mask::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask, bool mask_check) {
    
    static auto op = create__nested_tensor_from_mask_typed_handle();
    return op.redispatch(dispatchKeySet, t, mask, mask_check);
}

// aten::_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_from_mask_left_aligned::schema> create__nested_tensor_from_mask_left_aligned_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_from_mask_left_aligned::name, _nested_tensor_from_mask_left_aligned::overload_name)
      .typed<_nested_tensor_from_mask_left_aligned::schema>();
}

// aten::_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool
bool _nested_tensor_from_mask_left_aligned::call(const at::Tensor & t, const at::Tensor & mask) {
    
    static auto op = create__nested_tensor_from_mask_left_aligned_typed_handle();
    return op.call(t, mask);
}

// aten::_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool
bool _nested_tensor_from_mask_left_aligned::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask) {
    
    static auto op = create__nested_tensor_from_mask_left_aligned_typed_handle();
    return op.redispatch(dispatchKeySet, t, mask);
}

// aten::_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_from_padded::schema> create__nested_from_padded_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_from_padded::name, _nested_from_padded::overload_name)
      .typed<_nested_from_padded::schema>();
}

// aten::_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor
at::Tensor _nested_from_padded::call(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) {
    
    static auto op = create__nested_from_padded_typed_handle();
    return op.call(padded, cpu_nested_shape_example, fuse_transform_0213);
}

// aten::_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor
at::Tensor _nested_from_padded::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) {
    
    static auto op = create__nested_from_padded_typed_handle();
    return op.redispatch(dispatchKeySet, padded, cpu_nested_shape_example, fuse_transform_0213);
}

// aten::_nested_tensor_size(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_size::schema> create__nested_tensor_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_size::name, _nested_tensor_size::overload_name)
      .typed<_nested_tensor_size::schema>();
}

// aten::_nested_tensor_size(Tensor self) -> Tensor
at::Tensor _nested_tensor_size::call(const at::Tensor & self) {
    
    static auto op = create__nested_tensor_size_typed_handle();
    return op.call(self);
}

// aten::_nested_tensor_size(Tensor self) -> Tensor
at::Tensor _nested_tensor_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__nested_tensor_size_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_nested_tensor_strides(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_strides::schema> create__nested_tensor_strides_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_strides::name, _nested_tensor_strides::overload_name)
      .typed<_nested_tensor_strides::schema>();
}

// aten::_nested_tensor_strides(Tensor self) -> Tensor
at::Tensor _nested_tensor_strides::call(const at::Tensor & self) {
    
    static auto op = create__nested_tensor_strides_typed_handle();
    return op.call(self);
}

// aten::_nested_tensor_strides(Tensor self) -> Tensor
at::Tensor _nested_tensor_strides::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__nested_tensor_strides_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_nested_tensor_storage_offsets(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_storage_offsets::schema> create__nested_tensor_storage_offsets_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_storage_offsets::name, _nested_tensor_storage_offsets::overload_name)
      .typed<_nested_tensor_storage_offsets::schema>();
}

// aten::_nested_tensor_storage_offsets(Tensor self) -> Tensor
at::Tensor _nested_tensor_storage_offsets::call(const at::Tensor & self) {
    
    static auto op = create__nested_tensor_storage_offsets_typed_handle();
    return op.call(self);
}

// aten::_nested_tensor_storage_offsets(Tensor self) -> Tensor
at::Tensor _nested_tensor_storage_offsets::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__nested_tensor_storage_offsets_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_from_padded_and_nested_example::schema> create__nested_from_padded_and_nested_example_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_from_padded_and_nested_example::name, _nested_from_padded_and_nested_example::overload_name)
      .typed<_nested_from_padded_and_nested_example::schema>();
}

// aten::_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor
at::Tensor _nested_from_padded_and_nested_example::call(const at::Tensor & padded, const at::Tensor & nt_example) {
    
    static auto op = create__nested_from_padded_and_nested_example_typed_handle();
    return op.call(padded, nt_example);
}

// aten::_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor
at::Tensor _nested_from_padded_and_nested_example::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & nt_example) {
    
    static auto op = create__nested_from_padded_and_nested_example_typed_handle();
    return op.redispatch(dispatchKeySet, padded, nt_example);
}

// aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_view_from_buffer::schema> create__nested_view_from_buffer_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_view_from_buffer::name, _nested_view_from_buffer::overload_name)
      .typed<_nested_view_from_buffer::schema>();
}

// aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor(a)
at::Tensor _nested_view_from_buffer::call(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
    
    static auto op = create__nested_view_from_buffer_typed_handle();
    return op.call(self, nested_size, nested_strides, offsets);
}

// aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor(a)
at::Tensor _nested_view_from_buffer::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
    
    static auto op = create__nested_view_from_buffer_typed_handle();
    return op.redispatch(dispatchKeySet, self, nested_size, nested_strides, offsets);
}

// aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_view_from_buffer_copy::schema> create__nested_view_from_buffer_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_view_from_buffer_copy::name, _nested_view_from_buffer_copy::overload_name)
      .typed<_nested_view_from_buffer_copy::schema>();
}

// aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor
at::Tensor _nested_view_from_buffer_copy::call(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
    
    static auto op = create__nested_view_from_buffer_copy_typed_handle();
    return op.call(self, nested_size, nested_strides, offsets);
}

// aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor
at::Tensor _nested_view_from_buffer_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
    
    static auto op = create__nested_view_from_buffer_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, nested_size, nested_strides, offsets);
}

// aten::_nested_view_from_jagged(Tensor(a) self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_view_from_jagged::schema> create__nested_view_from_jagged_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_view_from_jagged::name, _nested_view_from_jagged::overload_name)
      .typed<_nested_view_from_jagged::schema>();
}

// aten::_nested_view_from_jagged(Tensor(a) self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None) -> Tensor(a)
at::Tensor _nested_view_from_jagged::call(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional<at::Tensor> & lengths, int64_t ragged_idx, const ::std::optional<at::Tensor> & min_seqlen, const ::std::optional<at::Tensor> & max_seqlen) {
    
    static auto op = create__nested_view_from_jagged_typed_handle();
    return op.call(self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen);
}

// aten::_nested_view_from_jagged(Tensor(a) self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None) -> Tensor(a)
at::Tensor _nested_view_from_jagged::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional<at::Tensor> & lengths, int64_t ragged_idx, const ::std::optional<at::Tensor> & min_seqlen, const ::std::optional<at::Tensor> & max_seqlen) {
    
    static auto op = create__nested_view_from_jagged_typed_handle();
    return op.redispatch(dispatchKeySet, self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen);
}

// aten::_nested_view_from_jagged_copy(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_view_from_jagged_copy::schema> create__nested_view_from_jagged_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_view_from_jagged_copy::name, _nested_view_from_jagged_copy::overload_name)
      .typed<_nested_view_from_jagged_copy::schema>();
}

// aten::_nested_view_from_jagged_copy(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None) -> Tensor
at::Tensor _nested_view_from_jagged_copy::call(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional<at::Tensor> & lengths, int64_t ragged_idx, const ::std::optional<at::Tensor> & min_seqlen, const ::std::optional<at::Tensor> & max_seqlen) {
    
    static auto op = create__nested_view_from_jagged_copy_typed_handle();
    return op.call(self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen);
}

// aten::_nested_view_from_jagged_copy(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None) -> Tensor
at::Tensor _nested_view_from_jagged_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional<at::Tensor> & lengths, int64_t ragged_idx, const ::std::optional<at::Tensor> & min_seqlen, const ::std::optional<at::Tensor> & max_seqlen) {
    
    static auto op = create__nested_view_from_jagged_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen);
}

// aten::_nested_get_values(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_get_values::schema> create__nested_get_values_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_get_values::name, _nested_get_values::overload_name)
      .typed<_nested_get_values::schema>();
}

// aten::_nested_get_values(Tensor(a) self) -> Tensor(a)
at::Tensor _nested_get_values::call(const at::Tensor & self) {
    
    static auto op = create__nested_get_values_typed_handle();
    return op.call(self);
}

// aten::_nested_get_values(Tensor(a) self) -> Tensor(a)
at::Tensor _nested_get_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__nested_get_values_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_nested_get_values_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_get_values_copy::schema> create__nested_get_values_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_get_values_copy::name, _nested_get_values_copy::overload_name)
      .typed<_nested_get_values_copy::schema>();
}

// aten::_nested_get_values_copy(Tensor self) -> Tensor
at::Tensor _nested_get_values_copy::call(const at::Tensor & self) {
    
    static auto op = create__nested_get_values_copy_typed_handle();
    return op.call(self);
}

// aten::_nested_get_values_copy(Tensor self) -> Tensor
at::Tensor _nested_get_values_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__nested_get_values_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_nested_get_offsets(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_get_offsets::schema> create__nested_get_offsets_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_get_offsets::name, _nested_get_offsets::overload_name)
      .typed<_nested_get_offsets::schema>();
}

// aten::_nested_get_offsets(Tensor self) -> Tensor
at::Tensor _nested_get_offsets::call(const at::Tensor & self) {
    
    static auto op = create__nested_get_offsets_typed_handle();
    return op.call(self);
}

// aten::_nested_get_offsets(Tensor self) -> Tensor
at::Tensor _nested_get_offsets::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__nested_get_offsets_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_nested_get_lengths(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_get_lengths::schema> create__nested_get_lengths_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_get_lengths::name, _nested_get_lengths::overload_name)
      .typed<_nested_get_lengths::schema>();
}

// aten::_nested_get_lengths(Tensor self) -> Tensor
at::Tensor _nested_get_lengths::call(const at::Tensor & self) {
    
    static auto op = create__nested_get_lengths_typed_handle();
    return op.call(self);
}

// aten::_nested_get_lengths(Tensor self) -> Tensor
at::Tensor _nested_get_lengths::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__nested_get_lengths_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_nested_get_ragged_idx(Tensor self) -> int
static C10_NOINLINE c10::TypedOperatorHandle<_nested_get_ragged_idx::schema> create__nested_get_ragged_idx_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_get_ragged_idx::name, _nested_get_ragged_idx::overload_name)
      .typed<_nested_get_ragged_idx::schema>();
}

// aten::_nested_get_ragged_idx(Tensor self) -> int
int64_t _nested_get_ragged_idx::call(const at::Tensor & self) {
    
    static auto op = create__nested_get_ragged_idx_typed_handle();
    return op.call(self);
}

// aten::_nested_get_ragged_idx(Tensor self) -> int
int64_t _nested_get_ragged_idx::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__nested_get_ragged_idx_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_nested_get_min_seqlen(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_get_min_seqlen::schema> create__nested_get_min_seqlen_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_get_min_seqlen::name, _nested_get_min_seqlen::overload_name)
      .typed<_nested_get_min_seqlen::schema>();
}

// aten::_nested_get_min_seqlen(Tensor self) -> Tensor
at::Tensor _nested_get_min_seqlen::call(const at::Tensor & self) {
    
    static auto op = create__nested_get_min_seqlen_typed_handle();
    return op.call(self);
}

// aten::_nested_get_min_seqlen(Tensor self) -> Tensor
at::Tensor _nested_get_min_seqlen::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__nested_get_min_seqlen_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_nested_get_max_seqlen(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_get_max_seqlen::schema> create__nested_get_max_seqlen_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_get_max_seqlen::name, _nested_get_max_seqlen::overload_name)
      .typed<_nested_get_max_seqlen::schema>();
}

// aten::_nested_get_max_seqlen(Tensor self) -> Tensor
at::Tensor _nested_get_max_seqlen::call(const at::Tensor & self) {
    
    static auto op = create__nested_get_max_seqlen_typed_handle();
    return op.call(self);
}

// aten::_nested_get_max_seqlen(Tensor self) -> Tensor
at::Tensor _nested_get_max_seqlen::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__nested_get_max_seqlen_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_nested_get_jagged_dummy(Tensor any) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_get_jagged_dummy::schema> create__nested_get_jagged_dummy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_get_jagged_dummy::name, _nested_get_jagged_dummy::overload_name)
      .typed<_nested_get_jagged_dummy::schema>();
}

// aten::_nested_get_jagged_dummy(Tensor any) -> Tensor
at::Tensor _nested_get_jagged_dummy::call(const at::Tensor & any) {
    
    static auto op = create__nested_get_jagged_dummy_typed_handle();
    return op.call(any);
}

// aten::_nested_get_jagged_dummy(Tensor any) -> Tensor
at::Tensor _nested_get_jagged_dummy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & any) {
    
    static auto op = create__nested_get_jagged_dummy_typed_handle();
    return op.redispatch(dispatchKeySet, any);
}

// aten::_nested_compute_contiguous_strides_offsets(Tensor nested_size) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_compute_contiguous_strides_offsets::schema> create__nested_compute_contiguous_strides_offsets_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_compute_contiguous_strides_offsets::name, _nested_compute_contiguous_strides_offsets::overload_name)
      .typed<_nested_compute_contiguous_strides_offsets::schema>();
}

// aten::_nested_compute_contiguous_strides_offsets(Tensor nested_size) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _nested_compute_contiguous_strides_offsets::call(const at::Tensor & nested_size) {
    
    static auto op = create__nested_compute_contiguous_strides_offsets_typed_handle();
    return op.call(nested_size);
}

// aten::_nested_compute_contiguous_strides_offsets(Tensor nested_size) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _nested_compute_contiguous_strides_offsets::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & nested_size) {
    
    static auto op = create__nested_compute_contiguous_strides_offsets_typed_handle();
    return op.redispatch(dispatchKeySet, nested_size);
}

// aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_trilinear::schema> create__trilinear_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_trilinear::name, _trilinear::overload_name)
      .typed<_trilinear::schema>();
}

// aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor
at::Tensor _trilinear::call(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim) {
    
    static auto op = create__trilinear_typed_handle();
    return op.call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim);
}

// aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor
at::Tensor _trilinear::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim) {
    
    static auto op = create__trilinear_typed_handle();
    return op.redispatch(dispatchKeySet, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim);
}

// aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<triplet_margin_loss::schema> create_triplet_margin_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(triplet_margin_loss::name, triplet_margin_loss::overload_name)
      .typed<triplet_margin_loss::schema>();
}

// aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor
at::Tensor triplet_margin_loss::call(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) {
    
    static auto op = create_triplet_margin_loss_typed_handle();
    return op.call(anchor, positive, negative, margin, p, eps, swap, reduction);
}

// aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor
at::Tensor triplet_margin_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) {
    
    static auto op = create_triplet_margin_loss_typed_handle();
    return op.redispatch(dispatchKeySet, anchor, positive, negative, margin, p, eps, swap, reduction);
}

// aten::trunc(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<trunc::schema> create_trunc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(trunc::name, trunc::overload_name)
      .typed<trunc::schema>();
}

// aten::trunc(Tensor self) -> Tensor
at::Tensor trunc::call(const at::Tensor & self) {
    
    static auto op = create_trunc_typed_handle();
    return op.call(self);
}

// aten::trunc(Tensor self) -> Tensor
at::Tensor trunc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_trunc_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::trunc_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<trunc_::schema> create_trunc__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(trunc_::name, trunc_::overload_name)
      .typed<trunc_::schema>();
}

// aten::trunc_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & trunc_::call(at::Tensor & self) {
    
    static auto op = create_trunc__typed_handle();
    return op.call(self);
}

// aten::trunc_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & trunc_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_trunc__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<trunc_out::schema> create_trunc_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(trunc_out::name, trunc_out::overload_name)
      .typed<trunc_out::schema>();
}

// aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & trunc_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_trunc_out_typed_handle();
    return op.call(self, out);
}

// aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & trunc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_trunc_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::fix(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fix::schema> create_fix_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fix::name, fix::overload_name)
      .typed<fix::schema>();
}

// aten::fix(Tensor self) -> Tensor
at::Tensor fix::call(const at::Tensor & self) {
    
    static auto op = create_fix_typed_handle();
    return op.call(self);
}

// aten::fix(Tensor self) -> Tensor
at::Tensor fix::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_fix_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::fix_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fix_::schema> create_fix__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fix_::name, fix_::overload_name)
      .typed<fix_::schema>();
}

// aten::fix_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & fix_::call(at::Tensor & self) {
    
    static auto op = create_fix__typed_handle();
    return op.call(self);
}

// aten::fix_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & fix_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_fix__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fix_out::schema> create_fix_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fix_out::name, fix_out::overload_name)
      .typed<fix_out::schema>();
}

// aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fix_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_fix_out_typed_handle();
    return op.call(self, out);
}

// aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fix_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_fix_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::type_as(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<type_as::schema> create_type_as_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(type_as::name, type_as::overload_name)
      .typed<type_as::schema>();
}

// aten::type_as(Tensor self, Tensor other) -> Tensor
at::Tensor type_as::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_type_as_typed_handle();
    return op.call(self, other);
}

// aten::type_as(Tensor self, Tensor other) -> Tensor
at::Tensor type_as::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_type_as_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<_has_compatible_shallow_copy_type::schema> create__has_compatible_shallow_copy_type_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_has_compatible_shallow_copy_type::name, _has_compatible_shallow_copy_type::overload_name)
      .typed<_has_compatible_shallow_copy_type::schema>();
}

// aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool
bool _has_compatible_shallow_copy_type::call(const at::Tensor & self, const at::Tensor & from) {
    
    static auto op = create__has_compatible_shallow_copy_type_typed_handle();
    return op.call(self, from);
}

// aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool
bool _has_compatible_shallow_copy_type::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & from) {
    
    static auto op = create__has_compatible_shallow_copy_type_typed_handle();
    return op.redispatch(dispatchKeySet, self, from);
}

// aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_unique::schema> create__unique_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_unique::name, _unique::overload_name)
      .typed<_unique::schema>();
}

// aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _unique::call(const at::Tensor & self, bool sorted, bool return_inverse) {
    
    static auto op = create__unique_typed_handle();
    return op.call(self, sorted, return_inverse);
}

// aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _unique::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse) {
    
    static auto op = create__unique_typed_handle();
    return op.redispatch(dispatchKeySet, self, sorted, return_inverse);
}

// aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<unique_dim::schema> create_unique_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unique_dim::name, unique_dim::overload_name)
      .typed<unique_dim::schema>();
}

// aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim::call(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
    
    static auto op = create_unique_dim_typed_handle();
    return op.call(self, dim, sorted, return_inverse, return_counts);
}

// aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
    
    static auto op = create_unique_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, sorted, return_inverse, return_counts);
}

// aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<unique_consecutive::schema> create_unique_consecutive_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unique_consecutive::name, unique_consecutive::overload_name)
      .typed<unique_consecutive::schema>();
}

// aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_consecutive::call(const at::Tensor & self, bool return_inverse, bool return_counts, ::std::optional<int64_t> dim) {
    
    static auto op = create_unique_consecutive_typed_handle();
    return op.call(self, return_inverse, return_counts, dim);
}

// aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_consecutive::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool return_inverse, bool return_counts, ::std::optional<int64_t> dim) {
    
    static auto op = create_unique_consecutive_typed_handle();
    return op.redispatch(dispatchKeySet, self, return_inverse, return_counts, dim);
}

// aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<unique_dim_consecutive::schema> create_unique_dim_consecutive_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unique_dim_consecutive::name, unique_dim_consecutive::overload_name)
      .typed<unique_dim_consecutive::schema>();
}

// aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_consecutive::call(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) {
    
    static auto op = create_unique_dim_consecutive_typed_handle();
    return op.call(self, dim, return_inverse, return_counts);
}

// aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_consecutive::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) {
    
    static auto op = create_unique_dim_consecutive_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, return_inverse, return_counts);
}

// aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_unique2::schema> create__unique2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_unique2::name, _unique2::overload_name)
      .typed<_unique2::schema>();
}

// aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _unique2::call(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) {
    
    static auto op = create__unique2_typed_handle();
    return op.call(self, sorted, return_inverse, return_counts);
}

// aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _unique2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) {
    
    static auto op = create__unique2_typed_handle();
    return op.redispatch(dispatchKeySet, self, sorted, return_inverse, return_counts);
}

// aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_unsafe_view::schema> create__unsafe_view_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_unsafe_view::name, _unsafe_view::overload_name)
      .typed<_unsafe_view::schema>();
}

// aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor
at::Tensor _unsafe_view::call(const at::Tensor & self, c10::SymIntArrayRef size) {
    
    static auto op = create__unsafe_view_typed_handle();
    return op.call(self, size);
}

// aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor
at::Tensor _unsafe_view::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) {
    
    static auto op = create__unsafe_view_typed_handle();
    return op.redispatch(dispatchKeySet, self, size);
}

// aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<unsqueeze::schema> create_unsqueeze_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unsqueeze::name, unsqueeze::overload_name)
      .typed<unsqueeze::schema>();
}

// aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
at::Tensor unsqueeze::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_unsqueeze_typed_handle();
    return op.call(self, dim);
}

// aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
at::Tensor unsqueeze::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_unsqueeze_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<unsqueeze_::schema> create_unsqueeze__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unsqueeze_::name, unsqueeze_::overload_name)
      .typed<unsqueeze_::schema>();
}

// aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)
at::Tensor & unsqueeze_::call(at::Tensor & self, int64_t dim) {
    
    static auto op = create_unsqueeze__typed_handle();
    return op.call(self, dim);
}

// aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)
at::Tensor & unsqueeze_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim) {
    
    static auto op = create_unsqueeze__typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<vander::schema> create_vander_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(vander::name, vander::overload_name)
      .typed<vander::schema>();
}

// aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor
at::Tensor vander::call(const at::Tensor & x, ::std::optional<int64_t> N, bool increasing) {
    
    static auto op = create_vander_typed_handle();
    return op.call(x, N, increasing);
}

// aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor
at::Tensor vander::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, ::std::optional<int64_t> N, bool increasing) {
    
    static auto op = create_vander_typed_handle();
    return op.redispatch(dispatchKeySet, x, N, increasing);
}

// aten::var(Tensor self, bool unbiased=True) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<var::schema> create_var_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var::name, var::overload_name)
      .typed<var::schema>();
}

// aten::var(Tensor self, bool unbiased=True) -> Tensor
at::Tensor var::call(const at::Tensor & self, bool unbiased) {
    
    static auto op = create_var_typed_handle();
    return op.call(self, unbiased);
}

// aten::var(Tensor self, bool unbiased=True) -> Tensor
at::Tensor var::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased) {
    
    static auto op = create_var_typed_handle();
    return op.redispatch(dispatchKeySet, self, unbiased);
}

// aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<var_dim::schema> create_var_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_dim::name, var_dim::overload_name)
      .typed<var_dim::schema>();
}

// aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
at::Tensor var_dim::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_dim_typed_handle();
    return op.call(self, dim, unbiased, keepdim);
}

// aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
at::Tensor var_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
}

// aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<var_correction::schema> create_var_correction_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_correction::name, var_correction::overload_name)
      .typed<var_correction::schema>();
}

// aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor
at::Tensor var_correction::call(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_correction_typed_handle();
    return op.call(self, dim, correction, keepdim);
}

// aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor
at::Tensor var_correction::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_correction_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
}

// aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<var_out::schema> create_var_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_out::name, var_out::overload_name)
      .typed<var_out::schema>();
}

// aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_out_typed_handle();
    return op.call(self, dim, unbiased, keepdim, out);
}

// aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out);
}

// aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<var_correction_out::schema> create_var_correction_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_correction_out::name, var_correction_out::overload_name)
      .typed<var_correction_out::schema>();
}

// aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_correction_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_correction_out_typed_handle();
    return op.call(self, dim, correction, keepdim, out);
}

// aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_correction_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_correction_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim, out);
}

// aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<var_names_dim::schema> create_var_names_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_names_dim::name, var_names_dim::overload_name)
      .typed<var_names_dim::schema>();
}

// aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
at::Tensor var_names_dim::call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_names_dim_typed_handle();
    return op.call(self, dim, unbiased, keepdim);
}

// aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
at::Tensor var_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_names_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
}

// aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<var_names_out::schema> create_var_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_names_out::name, var_names_out::overload_name)
      .typed<var_names_out::schema>();
}

// aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_names_out::call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_names_out_typed_handle();
    return op.call(self, dim, unbiased, keepdim, out);
}

// aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out);
}

// aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<var_correction_names::schema> create_var_correction_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_correction_names::name, var_correction_names::overload_name)
      .typed<var_correction_names::schema>();
}

// aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor
at::Tensor var_correction_names::call(const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_correction_names_typed_handle();
    return op.call(self, dim, correction, keepdim);
}

// aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor
at::Tensor var_correction_names::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_correction_names_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
}

// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<var_correction_names_out::schema> create_var_correction_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_correction_names_out::name, var_correction_names_out::overload_name)
      .typed<var_correction_names_out::schema>();
}

// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_correction_names_out::call(const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_correction_names_out_typed_handle();
    return op.call(self, dim, correction, keepdim, out);
}

// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & var_correction_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
    
    static auto op = create_var_correction_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim, out);
}

// aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<var_mean::schema> create_var_mean_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_mean::name, var_mean::overload_name)
      .typed<var_mean::schema>();
}

// aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean::call(const at::Tensor & self, bool unbiased) {
    
    static auto op = create_var_mean_typed_handle();
    return op.call(self, unbiased);
}

// aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased) {
    
    static auto op = create_var_mean_typed_handle();
    return op.redispatch(dispatchKeySet, self, unbiased);
}

// aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<var_mean_dim::schema> create_var_mean_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_mean_dim::name, var_mean_dim::overload_name)
      .typed<var_mean_dim::schema>();
}

// aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_dim::call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_mean_dim_typed_handle();
    return op.call(self, dim, unbiased, keepdim);
}

// aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_mean_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
}

// aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<var_mean_correction::schema> create_var_mean_correction_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_mean_correction::name, var_mean_correction::overload_name)
      .typed<var_mean_correction::schema>();
}

// aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_correction::call(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_mean_correction_typed_handle();
    return op.call(self, dim, correction, keepdim);
}

// aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_correction::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_mean_correction_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
}

// aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<var_mean_names_dim::schema> create_var_mean_names_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_mean_names_dim::name, var_mean_names_dim::overload_name)
      .typed<var_mean_names_dim::schema>();
}

// aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_names_dim::call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_mean_names_dim_typed_handle();
    return op.call(self, dim, unbiased, keepdim);
}

// aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_names_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
    
    static auto op = create_var_mean_names_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, unbiased, keepdim);
}

// aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<var_mean_correction_names::schema> create_var_mean_correction_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_mean_correction_names::name, var_mean_correction_names::overload_name)
      .typed<var_mean_correction_names::schema>();
}

// aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_correction_names::call(const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_mean_correction_names_typed_handle();
    return op.call(self, dim, correction, keepdim);
}

// aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> var_mean_correction_names::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim) {
    
    static auto op = create_var_mean_correction_names_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim);
}

// aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<view_as::schema> create_view_as_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_as::name, view_as::overload_name)
      .typed<view_as::schema>();
}

// aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)
at::Tensor view_as::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_view_as_typed_handle();
    return op.call(self, other);
}

// aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)
at::Tensor view_as::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_view_as_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<where_self::schema> create_where_self_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(where_self::name, where_self::overload_name)
      .typed<where_self::schema>();
}

// aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor
at::Tensor where_self::call(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_where_self_typed_handle();
    return op.call(condition, self, other);
}

// aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor
at::Tensor where_self::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_where_self_typed_handle();
    return op.redispatch(dispatchKeySet, condition, self, other);
}

// aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<where_self_out::schema> create_where_self_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(where_self_out::name, where_self_out::overload_name)
      .typed<where_self_out::schema>();
}

// aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & where_self_out::call(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_where_self_out_typed_handle();
    return op.call(condition, self, other, out);
}

// aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & where_self_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_where_self_out_typed_handle();
    return op.redispatch(dispatchKeySet, condition, self, other, out);
}

// aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<where_ScalarSelf::schema> create_where_ScalarSelf_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(where_ScalarSelf::name, where_ScalarSelf::overload_name)
      .typed<where_ScalarSelf::schema>();
}

// aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor
at::Tensor where_ScalarSelf::call(const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_where_ScalarSelf_typed_handle();
    return op.call(condition, self, other);
}

// aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor
at::Tensor where_ScalarSelf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_where_ScalarSelf_typed_handle();
    return op.redispatch(dispatchKeySet, condition, self, other);
}

// aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<where_ScalarOther::schema> create_where_ScalarOther_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(where_ScalarOther::name, where_ScalarOther::overload_name)
      .typed<where_ScalarOther::schema>();
}

// aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor
at::Tensor where_ScalarOther::call(const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_where_ScalarOther_typed_handle();
    return op.call(condition, self, other);
}

// aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor
at::Tensor where_ScalarOther::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_where_ScalarOther_typed_handle();
    return op.redispatch(dispatchKeySet, condition, self, other);
}

// aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<where_Scalar::schema> create_where_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(where_Scalar::name, where_Scalar::overload_name)
      .typed<where_Scalar::schema>();
}

// aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor
at::Tensor where_Scalar::call(const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) {
    
    static auto op = create_where_Scalar_typed_handle();
    return op.call(condition, self, other);
}

// aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor
at::Tensor where_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) {
    
    static auto op = create_where_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, condition, self, other);
}

// aten::where(Tensor condition) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<where::schema> create_where_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(where::name, where::overload_name)
      .typed<where::schema>();
}

// aten::where(Tensor condition) -> Tensor[]
::std::vector<at::Tensor> where::call(const at::Tensor & condition) {
    
    static auto op = create_where_typed_handle();
    return op.call(condition);
}

// aten::where(Tensor condition) -> Tensor[]
::std::vector<at::Tensor> where::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition) {
    
    static auto op = create_where_typed_handle();
    return op.redispatch(dispatchKeySet, condition);
}

// aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<norm_except_dim::schema> create_norm_except_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_except_dim::name, norm_except_dim::overload_name)
      .typed<norm_except_dim::schema>();
}

// aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor
at::Tensor norm_except_dim::call(const at::Tensor & v, int64_t pow, int64_t dim) {
    
    static auto op = create_norm_except_dim_typed_handle();
    return op.call(v, pow, dim);
}

// aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor
at::Tensor norm_except_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, int64_t pow, int64_t dim) {
    
    static auto op = create_norm_except_dim_typed_handle();
    return op.redispatch(dispatchKeySet, v, pow, dim);
}

// aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_weight_norm::schema> create__weight_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_weight_norm::name, _weight_norm::overload_name)
      .typed<_weight_norm::schema>();
}

// aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor
at::Tensor _weight_norm::call(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
    
    static auto op = create__weight_norm_typed_handle();
    return op.call(v, g, dim);
}

// aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor
at::Tensor _weight_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim) {
    
    static auto op = create__weight_norm_typed_handle();
    return op.redispatch(dispatchKeySet, v, g, dim);
}

// aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_weight_norm_interface::schema> create__weight_norm_interface_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_weight_norm_interface::name, _weight_norm_interface::overload_name)
      .typed<_weight_norm_interface::schema>();
}

// aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface::call(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
    
    static auto op = create__weight_norm_interface_typed_handle();
    return op.call(v, g, dim);
}

// aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim) {
    
    static auto op = create__weight_norm_interface_typed_handle();
    return op.redispatch(dispatchKeySet, v, g, dim);
}

// aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_weight_norm_interface_backward::schema> create__weight_norm_interface_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_weight_norm_interface_backward::name, _weight_norm_interface_backward::overload_name)
      .typed<_weight_norm_interface_backward::schema>();
}

// aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_backward::call(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
    
    static auto op = create__weight_norm_interface_backward_typed_handle();
    return op.call(grad_w, saved_v, saved_g, saved_norms, dim);
}

// aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
    
    static auto op = create__weight_norm_interface_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_w, saved_v, saved_g, saved_norms, dim);
}

// aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_weight_norm_differentiable_backward::schema> create__weight_norm_differentiable_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_weight_norm_differentiable_backward::name, _weight_norm_differentiable_backward::overload_name)
      .typed<_weight_norm_differentiable_backward::schema>();
}

// aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _weight_norm_differentiable_backward::call(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
    
    static auto op = create__weight_norm_differentiable_backward_typed_handle();
    return op.call(grad_w, saved_v, saved_g, saved_norms, dim);
}

// aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _weight_norm_differentiable_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
    
    static auto op = create__weight_norm_differentiable_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_w, saved_v, saved_g, saved_norms, dim);
}

// aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<zeros_names::schema> create_zeros_names_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(zeros_names::name, zeros_names::overload_name)
      .typed<zeros_names::schema>();
}

// aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor zeros_names::call(at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_zeros_names_typed_handle();
    return op.call(size, names, dtype, layout, device, pin_memory);
}

// aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor zeros_names::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_zeros_names_typed_handle();
    return op.redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory);
}

// aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_efficientzerotensor::schema> create__efficientzerotensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_efficientzerotensor::name, _efficientzerotensor::overload_name)
      .typed<_efficientzerotensor::schema>();
}

// aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _efficientzerotensor::call(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__efficientzerotensor_typed_handle();
    return op.call(size, dtype, layout, device, pin_memory);
}

// aten::_efficientzerotensor(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _efficientzerotensor::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__efficientzerotensor_typed_handle();
    return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory);
}

// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<zeros::schema> create_zeros_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(zeros::name, zeros::overload_name)
      .typed<zeros::schema>();
}

// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor zeros::call(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_zeros_typed_handle();
    return op.call(size, dtype, layout, device, pin_memory);
}

// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor zeros::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_zeros_typed_handle();
    return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory);
}

// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<zeros_out::schema> create_zeros_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(zeros_out::name, zeros_out::overload_name)
      .typed<zeros_out::schema>();
}

// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & zeros_out::call(c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_zeros_out_typed_handle();
    return op.call(size, out);
}

// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & zeros_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_zeros_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, out);
}

// aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<zeros_like::schema> create_zeros_like_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(zeros_like::name, zeros_like::overload_name)
      .typed<zeros_like::schema>();
}

// aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor zeros_like::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_zeros_like_typed_handle();
    return op.call(self, dtype, layout, device, pin_memory, memory_format);
}

// aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
at::Tensor zeros_like::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_zeros_like_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format);
}

// aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_standard_gamma_grad::schema> create__standard_gamma_grad_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_standard_gamma_grad::name, _standard_gamma_grad::overload_name)
      .typed<_standard_gamma_grad::schema>();
}

// aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor
at::Tensor _standard_gamma_grad::call(const at::Tensor & self, const at::Tensor & output) {
    
    static auto op = create__standard_gamma_grad_typed_handle();
    return op.call(self, output);
}

// aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor
at::Tensor _standard_gamma_grad::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & output) {
    
    static auto op = create__standard_gamma_grad_typed_handle();
    return op.redispatch(dispatchKeySet, self, output);
}

// aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_standard_gamma::schema> create__standard_gamma_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_standard_gamma::name, _standard_gamma::overload_name)
      .typed<_standard_gamma::schema>();
}

// aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor
at::Tensor _standard_gamma::call(const at::Tensor & self, ::std::optional<at::Generator> generator) {
    
    static auto op = create__standard_gamma_typed_handle();
    return op.call(self, generator);
}

// aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor
at::Tensor _standard_gamma::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator) {
    
    static auto op = create__standard_gamma_typed_handle();
    return op.redispatch(dispatchKeySet, self, generator);
}

// aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_dirichlet_grad::schema> create__dirichlet_grad_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_dirichlet_grad::name, _dirichlet_grad::overload_name)
      .typed<_dirichlet_grad::schema>();
}

// aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor
at::Tensor _dirichlet_grad::call(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) {
    
    static auto op = create__dirichlet_grad_typed_handle();
    return op.call(x, alpha, total);
}

// aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor
at::Tensor _dirichlet_grad::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) {
    
    static auto op = create__dirichlet_grad_typed_handle();
    return op.redispatch(dispatchKeySet, x, alpha, total);
}

// aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sample_dirichlet::schema> create__sample_dirichlet_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sample_dirichlet::name, _sample_dirichlet::overload_name)
      .typed<_sample_dirichlet::schema>();
}

// aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor
at::Tensor _sample_dirichlet::call(const at::Tensor & self, ::std::optional<at::Generator> generator) {
    
    static auto op = create__sample_dirichlet_typed_handle();
    return op.call(self, generator);
}

// aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor
at::Tensor _sample_dirichlet::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator) {
    
    static auto op = create__sample_dirichlet_typed_handle();
    return op.redispatch(dispatchKeySet, self, generator);
}

// aten::poisson(Tensor self, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<poisson::schema> create_poisson_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(poisson::name, poisson::overload_name)
      .typed<poisson::schema>();
}

// aten::poisson(Tensor self, Generator? generator=None) -> Tensor
at::Tensor poisson::call(const at::Tensor & self, ::std::optional<at::Generator> generator) {
    
    static auto op = create_poisson_typed_handle();
    return op.call(self, generator);
}

// aten::poisson(Tensor self, Generator? generator=None) -> Tensor
at::Tensor poisson::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator) {
    
    static auto op = create_poisson_typed_handle();
    return op.redispatch(dispatchKeySet, self, generator);
}

// aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<binomial::schema> create_binomial_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(binomial::name, binomial::overload_name)
      .typed<binomial::schema>();
}

// aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor
at::Tensor binomial::call(const at::Tensor & count, const at::Tensor & prob, ::std::optional<at::Generator> generator) {
    
    static auto op = create_binomial_typed_handle();
    return op.call(count, prob, generator);
}

// aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor
at::Tensor binomial::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & count, const at::Tensor & prob, ::std::optional<at::Generator> generator) {
    
    static auto op = create_binomial_typed_handle();
    return op.redispatch(dispatchKeySet, count, prob, generator);
}

// aten::native_norm(Tensor self, Scalar p=2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<native_norm::schema> create_native_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_norm::name, native_norm::overload_name)
      .typed<native_norm::schema>();
}

// aten::native_norm(Tensor self, Scalar p=2) -> Tensor
at::Tensor native_norm::call(const at::Tensor & self, const at::Scalar & p) {
    
    static auto op = create_native_norm_typed_handle();
    return op.call(self, p);
}

// aten::native_norm(Tensor self, Scalar p=2) -> Tensor
at::Tensor native_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p) {
    
    static auto op = create_native_norm_typed_handle();
    return op.redispatch(dispatchKeySet, self, p);
}

// aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<native_norm_ScalarOpt_dim_dtype::schema> create_native_norm_ScalarOpt_dim_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_norm_ScalarOpt_dim_dtype::name, native_norm_ScalarOpt_dim_dtype::overload_name)
      .typed<native_norm_ScalarOpt_dim_dtype::schema>();
}

// aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor
at::Tensor native_norm_ScalarOpt_dim_dtype::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_native_norm_ScalarOpt_dim_dtype_typed_handle();
    return op.call(self, p, dim, keepdim, dtype);
}

// aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor
at::Tensor native_norm_ScalarOpt_dim_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_native_norm_ScalarOpt_dim_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, dtype);
}

// aten::_batch_norm_with_update(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_batch_norm_with_update::schema> create__batch_norm_with_update_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_batch_norm_with_update::name, _batch_norm_with_update::overload_name)
      .typed<_batch_norm_with_update::schema>();
}

// aten::_batch_norm_with_update(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_with_update::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps) {
    
    static auto op = create__batch_norm_with_update_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, momentum, eps);
}

// aten::_batch_norm_with_update(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_with_update::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps) {
    
    static auto op = create__batch_norm_with_update_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, momentum, eps);
}

// aten::_batch_norm_with_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd, Tensor(g!) reserve) -> (Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))
static C10_NOINLINE c10::TypedOperatorHandle<_batch_norm_with_update_out::schema> create__batch_norm_with_update_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_batch_norm_with_update_out::name, _batch_norm_with_update_out::overload_name)
      .typed<_batch_norm_with_update_out::schema>();
}

// aten::_batch_norm_with_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd, Tensor(g!) reserve) -> (Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _batch_norm_with_update_out::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, at::Tensor & reserve) {
    
    static auto op = create__batch_norm_with_update_out_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, momentum, eps, out, save_mean, save_invstd, reserve);
}

// aten::_batch_norm_with_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd, Tensor(g!) reserve) -> (Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _batch_norm_with_update_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, at::Tensor & reserve) {
    
    static auto op = create__batch_norm_with_update_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, momentum, eps, out, save_mean, save_invstd, reserve);
}

// aten::_batch_norm_no_update(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_batch_norm_no_update::schema> create__batch_norm_no_update_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_batch_norm_no_update::name, _batch_norm_no_update::overload_name)
      .typed<_batch_norm_no_update::schema>();
}

// aten::_batch_norm_no_update(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_no_update::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps) {
    
    static auto op = create__batch_norm_no_update_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, momentum, eps);
}

// aten::_batch_norm_no_update(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_no_update::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps) {
    
    static auto op = create__batch_norm_no_update_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, momentum, eps);
}

// aten::batch_norm_backward(Tensor grad_out, Tensor input, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, bool update, float eps, bool[3] output_mask, Tensor reserve) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_backward::schema> create_batch_norm_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_backward::name, batch_norm_backward::overload_name)
      .typed<batch_norm_backward::schema>();
}

// aten::batch_norm_backward(Tensor grad_out, Tensor input, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, bool update, float eps, bool[3] output_mask, Tensor reserve) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> batch_norm_backward::call(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, bool update, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reserve) {
    
    static auto op = create_batch_norm_backward_typed_handle();
    return op.call(grad_out, input, weight, running_mean, running_var, save_mean, save_var, update, eps, output_mask, reserve);
}

// aten::batch_norm_backward(Tensor grad_out, Tensor input, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, bool update, float eps, bool[3] output_mask, Tensor reserve) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> batch_norm_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, bool update, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reserve) {
    
    static auto op = create_batch_norm_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, input, weight, running_mean, running_var, save_mean, save_var, update, eps, output_mask, reserve);
}

// aten::_sparse_sum(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sum::schema> create__sparse_sum_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_sum::name, _sparse_sum::overload_name)
      .typed<_sparse_sum::schema>();
}

// aten::_sparse_sum(Tensor self) -> Tensor
at::Tensor _sparse_sum::call(const at::Tensor & self) {
    
    static auto op = create__sparse_sum_typed_handle();
    return op.call(self);
}

// aten::_sparse_sum(Tensor self) -> Tensor
at::Tensor _sparse_sum::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__sparse_sum_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sum_dtype::schema> create__sparse_sum_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_sum_dtype::name, _sparse_sum_dtype::overload_name)
      .typed<_sparse_sum_dtype::schema>();
}

// aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor
at::Tensor _sparse_sum_dtype::call(const at::Tensor & self, at::ScalarType dtype) {
    
    static auto op = create__sparse_sum_dtype_typed_handle();
    return op.call(self, dtype);
}

// aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor
at::Tensor _sparse_sum_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype) {
    
    static auto op = create__sparse_sum_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype);
}

// aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sum_dim::schema> create__sparse_sum_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_sum_dim::name, _sparse_sum_dim::overload_name)
      .typed<_sparse_sum_dim::schema>();
}

// aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor
at::Tensor _sparse_sum_dim::call(const at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create__sparse_sum_dim_typed_handle();
    return op.call(self, dim);
}

// aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor
at::Tensor _sparse_sum_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create__sparse_sum_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sum_dim_dtype::schema> create__sparse_sum_dim_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_sum_dim_dtype::name, _sparse_sum_dim_dtype::overload_name)
      .typed<_sparse_sum_dim_dtype::schema>();
}

// aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor
at::Tensor _sparse_sum_dim_dtype::call(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) {
    
    static auto op = create__sparse_sum_dim_dtype_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor
at::Tensor _sparse_sum_dim_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) {
    
    static auto op = create__sparse_sum_dim_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sum_backward::schema> create__sparse_sum_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_sum_backward::name, _sparse_sum_backward::overload_name)
      .typed<_sparse_sum_backward::schema>();
}

// aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor
at::Tensor _sparse_sum_backward::call(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create__sparse_sum_backward_typed_handle();
    return op.call(grad, self, dim);
}

// aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor
at::Tensor _sparse_sum_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create__sparse_sum_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, self, dim);
}

// aten::_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_csr_sum_dim_dtype::schema> create__sparse_csr_sum_dim_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_csr_sum_dim_dtype::name, _sparse_csr_sum_dim_dtype::overload_name)
      .typed<_sparse_csr_sum_dim_dtype::schema>();
}

// aten::_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_csr_sum_dim_dtype::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_csr_sum_dim_dtype_typed_handle();
    return op.call(self, dim, keepdim, dtype);
}

// aten::_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_csr_sum_dim_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_csr_sum_dim_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
}

// aten::_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_csr_prod_dim_dtype::schema> create__sparse_csr_prod_dim_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_csr_prod_dim_dtype::name, _sparse_csr_prod_dim_dtype::overload_name)
      .typed<_sparse_csr_prod_dim_dtype::schema>();
}

// aten::_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_csr_prod_dim_dtype::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_csr_prod_dim_dtype_typed_handle();
    return op.call(self, dim, keepdim, dtype);
}

// aten::_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_csr_prod_dim_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_csr_prod_dim_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype);
}

// aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_softmax_int::schema> create__sparse_softmax_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_softmax_int::name, _sparse_softmax_int::overload_name)
      .typed<_sparse_softmax_int::schema>();
}

// aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_softmax_int::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_softmax_int_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_softmax_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_softmax_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_softmax_Dimname::schema> create__sparse_softmax_Dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_softmax_Dimname::name, _sparse_softmax_Dimname::overload_name)
      .typed<_sparse_softmax_Dimname::schema>();
}

// aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_softmax_Dimname::call(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_softmax_Dimname_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_softmax_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_softmax_Dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_softmax::schema> create__sparse_softmax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_softmax::name, _sparse_softmax::overload_name)
      .typed<_sparse_softmax::schema>();
}

// aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
at::Tensor _sparse_softmax::call(const at::Tensor & self, int64_t dim, bool half_to_float) {
    
    static auto op = create__sparse_softmax_typed_handle();
    return op.call(self, dim, half_to_float);
}

// aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
at::Tensor _sparse_softmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) {
    
    static auto op = create__sparse_softmax_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, half_to_float);
}

// aten::_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_softmax_backward_data::schema> create__sparse_softmax_backward_data_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_softmax_backward_data::name, _sparse_softmax_backward_data::overload_name)
      .typed<_sparse_softmax_backward_data::schema>();
}

// aten::_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
at::Tensor _sparse_softmax_backward_data::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
    
    static auto op = create__sparse_softmax_backward_data_typed_handle();
    return op.call(grad_output, output, dim, self);
}

// aten::_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
at::Tensor _sparse_softmax_backward_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
    
    static auto op = create__sparse_softmax_backward_data_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, dim, self);
}

// aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_log_softmax_int::schema> create__sparse_log_softmax_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_log_softmax_int::name, _sparse_log_softmax_int::overload_name)
      .typed<_sparse_log_softmax_int::schema>();
}

// aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_log_softmax_int::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_log_softmax_int_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_log_softmax_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_log_softmax_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_log_softmax_Dimname::schema> create__sparse_log_softmax_Dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_log_softmax_Dimname::name, _sparse_log_softmax_Dimname::overload_name)
      .typed<_sparse_log_softmax_Dimname::schema>();
}

// aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_log_softmax_Dimname::call(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_log_softmax_Dimname_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor _sparse_log_softmax_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__sparse_log_softmax_Dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_log_softmax::schema> create__sparse_log_softmax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_log_softmax::name, _sparse_log_softmax::overload_name)
      .typed<_sparse_log_softmax::schema>();
}

// aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
at::Tensor _sparse_log_softmax::call(const at::Tensor & self, int64_t dim, bool half_to_float) {
    
    static auto op = create__sparse_log_softmax_typed_handle();
    return op.call(self, dim, half_to_float);
}

// aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
at::Tensor _sparse_log_softmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) {
    
    static auto op = create__sparse_log_softmax_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, half_to_float);
}

// aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_log_softmax_backward_data::schema> create__sparse_log_softmax_backward_data_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_log_softmax_backward_data::name, _sparse_log_softmax_backward_data::overload_name)
      .typed<_sparse_log_softmax_backward_data::schema>();
}

// aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
at::Tensor _sparse_log_softmax_backward_data::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
    
    static auto op = create__sparse_log_softmax_backward_data_typed_handle();
    return op.call(grad_output, output, dim, self);
}

// aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
at::Tensor _sparse_log_softmax_backward_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
    
    static auto op = create__sparse_log_softmax_backward_data_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, dim, self);
}

// aten::_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_spdiags::schema> create__spdiags_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_spdiags::name, _spdiags::overload_name)
      .typed<_spdiags::schema>();
}

// aten::_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor
at::Tensor _spdiags::call(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, ::std::optional<at::Layout> layout) {
    
    static auto op = create__spdiags_typed_handle();
    return op.call(diagonals, offsets, shape, layout);
}

// aten::_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor
at::Tensor _spdiags::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, ::std::optional<at::Layout> layout) {
    
    static auto op = create__spdiags_typed_handle();
    return op.redispatch(dispatchKeySet, diagonals, offsets, shape, layout);
}

// aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<norm_ScalarOpt_dtype::schema> create_norm_ScalarOpt_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_ScalarOpt_dtype::name, norm_ScalarOpt_dtype::overload_name)
      .typed<norm_ScalarOpt_dtype::schema>();
}

// aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor
at::Tensor norm_ScalarOpt_dtype::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::ScalarType dtype) {
    
    static auto op = create_norm_ScalarOpt_dtype_typed_handle();
    return op.call(self, p, dtype);
}

// aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor
at::Tensor norm_ScalarOpt_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::ScalarType dtype) {
    
    static auto op = create_norm_ScalarOpt_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dtype);
}

// aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<norm_Scalar::schema> create_norm_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_Scalar::name, norm_Scalar::overload_name)
      .typed<norm_Scalar::schema>();
}

// aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor
at::Tensor norm_Scalar::call(const at::Tensor & self, const at::Scalar & p) {
    
    static auto op = create_norm_Scalar_typed_handle();
    return op.call(self, p);
}

// aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor
at::Tensor norm_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p) {
    
    static auto op = create_norm_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, p);
}

// aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<norm_ScalarOpt_dim_dtype::schema> create_norm_ScalarOpt_dim_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_ScalarOpt_dim_dtype::name, norm_ScalarOpt_dim_dtype::overload_name)
      .typed<norm_ScalarOpt_dim_dtype::schema>();
}

// aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
at::Tensor norm_ScalarOpt_dim_dtype::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
    
    static auto op = create_norm_ScalarOpt_dim_dtype_typed_handle();
    return op.call(self, p, dim, keepdim, dtype);
}

// aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
at::Tensor norm_ScalarOpt_dim_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
    
    static auto op = create_norm_ScalarOpt_dim_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, dtype);
}

// aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<norm_ScalarOpt_dim::schema> create_norm_ScalarOpt_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_ScalarOpt_dim::name, norm_ScalarOpt_dim::overload_name)
      .typed<norm_ScalarOpt_dim::schema>();
}

// aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor
at::Tensor norm_ScalarOpt_dim::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_norm_ScalarOpt_dim_typed_handle();
    return op.call(self, p, dim, keepdim);
}

// aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor
at::Tensor norm_ScalarOpt_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_norm_ScalarOpt_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim);
}

// aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<norm_dtype_out::schema> create_norm_dtype_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_dtype_out::name, norm_dtype_out::overload_name)
      .typed<norm_dtype_out::schema>();
}

// aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_dtype_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_norm_dtype_out_typed_handle();
    return op.call(self, p, dim, keepdim, dtype, out);
}

// aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_norm_dtype_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out);
}

// aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<norm_out::schema> create_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_out::name, norm_out::overload_name)
      .typed<norm_out::schema>();
}

// aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_norm_out_typed_handle();
    return op.call(self, p, dim, keepdim, out);
}

// aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, out);
}

// aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<norm_names_ScalarOpt_dim_dtype::schema> create_norm_names_ScalarOpt_dim_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_names_ScalarOpt_dim_dtype::name, norm_names_ScalarOpt_dim_dtype::overload_name)
      .typed<norm_names_ScalarOpt_dim_dtype::schema>();
}

// aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
at::Tensor norm_names_ScalarOpt_dim_dtype::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) {
    
    static auto op = create_norm_names_ScalarOpt_dim_dtype_typed_handle();
    return op.call(self, p, dim, keepdim, dtype);
}

// aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
at::Tensor norm_names_ScalarOpt_dim_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) {
    
    static auto op = create_norm_names_ScalarOpt_dim_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, dtype);
}

// aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<norm_names_ScalarOpt_dim::schema> create_norm_names_ScalarOpt_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_names_ScalarOpt_dim::name, norm_names_ScalarOpt_dim::overload_name)
      .typed<norm_names_ScalarOpt_dim::schema>();
}

// aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor
at::Tensor norm_names_ScalarOpt_dim::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) {
    
    static auto op = create_norm_names_ScalarOpt_dim_typed_handle();
    return op.call(self, p, dim, keepdim);
}

// aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor
at::Tensor norm_names_ScalarOpt_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) {
    
    static auto op = create_norm_names_ScalarOpt_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim);
}

// aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<norm_names_dtype_out::schema> create_norm_names_dtype_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_names_dtype_out::name, norm_names_dtype_out::overload_name)
      .typed<norm_names_dtype_out::schema>();
}

// aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_names_dtype_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_norm_names_dtype_out_typed_handle();
    return op.call(self, p, dim, keepdim, dtype, out);
}

// aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_names_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_norm_names_dtype_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out);
}

// aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<norm_names_out::schema> create_norm_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_names_out::name, norm_names_out::overload_name)
      .typed<norm_names_out::schema>();
}

// aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_names_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_norm_names_out_typed_handle();
    return op.call(self, p, dim, keepdim, out);
}

// aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_norm_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, out);
}

// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)
static C10_NOINLINE c10::TypedOperatorHandle<frexp_Tensor::schema> create_frexp_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(frexp_Tensor::name, frexp_Tensor::overload_name)
      .typed<frexp_Tensor::schema>();
}

// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)
::std::tuple<at::Tensor,at::Tensor> frexp_Tensor::call(const at::Tensor & self) {
    
    static auto op = create_frexp_Tensor_typed_handle();
    return op.call(self);
}

// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)
::std::tuple<at::Tensor,at::Tensor> frexp_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_frexp_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)
static C10_NOINLINE c10::TypedOperatorHandle<frexp_Tensor_out::schema> create_frexp_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(frexp_Tensor_out::name, frexp_Tensor_out::overload_name)
      .typed<frexp_Tensor_out::schema>();
}

// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)
::std::tuple<at::Tensor &,at::Tensor &> frexp_Tensor_out::call(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent) {
    
    static auto op = create_frexp_Tensor_out_typed_handle();
    return op.call(self, mantissa, exponent);
}

// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)
::std::tuple<at::Tensor &,at::Tensor &> frexp_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent) {
    
    static auto op = create_frexp_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mantissa, exponent);
}

// aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<frobenius_norm_dim::schema> create_frobenius_norm_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(frobenius_norm_dim::name, frobenius_norm_dim::overload_name)
      .typed<frobenius_norm_dim::schema>();
}

// aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
at::Tensor frobenius_norm_dim::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_frobenius_norm_dim_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
at::Tensor frobenius_norm_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_frobenius_norm_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<frobenius_norm_out::schema> create_frobenius_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(frobenius_norm_out::name, frobenius_norm_out::overload_name)
      .typed<frobenius_norm_out::schema>();
}

// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & frobenius_norm_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_frobenius_norm_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & frobenius_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_frobenius_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nuclear_norm::schema> create_nuclear_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nuclear_norm::name, nuclear_norm::overload_name)
      .typed<nuclear_norm::schema>();
}

// aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor
at::Tensor nuclear_norm::call(const at::Tensor & self, bool keepdim) {
    
    static auto op = create_nuclear_norm_typed_handle();
    return op.call(self, keepdim);
}

// aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor
at::Tensor nuclear_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool keepdim) {
    
    static auto op = create_nuclear_norm_typed_handle();
    return op.redispatch(dispatchKeySet, self, keepdim);
}

// aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nuclear_norm_out::schema> create_nuclear_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nuclear_norm_out::name, nuclear_norm_out::overload_name)
      .typed<nuclear_norm_out::schema>();
}

// aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nuclear_norm_out::call(const at::Tensor & self, bool keepdim, at::Tensor & out) {
    
    static auto op = create_nuclear_norm_out_typed_handle();
    return op.call(self, keepdim, out);
}

// aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nuclear_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool keepdim, at::Tensor & out) {
    
    static auto op = create_nuclear_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, keepdim, out);
}

// aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nuclear_norm_dim::schema> create_nuclear_norm_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nuclear_norm_dim::name, nuclear_norm_dim::overload_name)
      .typed<nuclear_norm_dim::schema>();
}

// aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor
at::Tensor nuclear_norm_dim::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_nuclear_norm_dim_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor
at::Tensor nuclear_norm_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_nuclear_norm_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nuclear_norm_dim_out::schema> create_nuclear_norm_dim_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nuclear_norm_dim_out::name, nuclear_norm_dim_out::overload_name)
      .typed<nuclear_norm_dim_out::schema>();
}

// aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nuclear_norm_dim_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_nuclear_norm_dim_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nuclear_norm_dim_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_nuclear_norm_dim_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<clone::schema> create_clone_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clone::name, clone::overload_name)
      .typed<clone::schema>();
}

// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
at::Tensor clone::call(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_clone_typed_handle();
    return op.call(self, memory_format);
}

// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
at::Tensor clone::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_clone_typed_handle();
    return op.redispatch(dispatchKeySet, self, memory_format);
}

// aten::positive(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<positive::schema> create_positive_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(positive::name, positive::overload_name)
      .typed<positive::schema>();
}

// aten::positive(Tensor(a) self) -> Tensor(a)
at::Tensor positive::call(const at::Tensor & self) {
    
    static auto op = create_positive_typed_handle();
    return op.call(self);
}

// aten::positive(Tensor(a) self) -> Tensor(a)
at::Tensor positive::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_positive_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<resize_as_::schema> create_resize_as__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(resize_as_::name, resize_as_::overload_name)
      .typed<resize_as_::schema>();
}

// aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
const at::Tensor & resize_as_::call(const at::Tensor & self, const at::Tensor & the_template, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_resize_as__typed_handle();
    return op.call(self, the_template, memory_format);
}

// aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
const at::Tensor & resize_as_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_resize_as__typed_handle();
    return op.redispatch(dispatchKeySet, self, the_template, memory_format);
}

// aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<resize_as_sparse_::schema> create_resize_as_sparse__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(resize_as_sparse_::name, resize_as_sparse_::overload_name)
      .typed<resize_as_sparse_::schema>();
}

// aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)
const at::Tensor & resize_as_sparse_::call(const at::Tensor & self, const at::Tensor & the_template) {
    
    static auto op = create_resize_as_sparse__typed_handle();
    return op.call(self, the_template);
}

// aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)
const at::Tensor & resize_as_sparse_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template) {
    
    static auto op = create_resize_as_sparse__typed_handle();
    return op.redispatch(dispatchKeySet, self, the_template);
}

// aten::zero_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<zero_::schema> create_zero__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(zero_::name, zero_::overload_name)
      .typed<zero_::schema>();
}

// aten::zero_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & zero_::call(at::Tensor & self) {
    
    static auto op = create_zero__typed_handle();
    return op.call(self);
}

// aten::zero_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & zero_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_zero__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sub_out::schema> create_sub_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sub_out::name, sub_out::overload_name)
      .typed<sub_out::schema>();
}

// aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sub_out::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_sub_out_typed_handle();
    return op.call(self, other, alpha, out);
}

// aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sub_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_sub_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha, out);
}

// aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sub_Tensor::schema> create_sub_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sub_Tensor::name, sub_Tensor::overload_name)
      .typed<sub_Tensor::schema>();
}

// aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
at::Tensor sub_Tensor::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_sub_Tensor_typed_handle();
    return op.call(self, other, alpha);
}

// aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
at::Tensor sub_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_sub_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sub__Tensor::schema> create_sub__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sub__Tensor::name, sub__Tensor::overload_name)
      .typed<sub__Tensor::schema>();
}

// aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
at::Tensor & sub__Tensor::call(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_sub__Tensor_typed_handle();
    return op.call(self, other, alpha);
}

// aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
at::Tensor & sub__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_sub__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sub_Scalar::schema> create_sub_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sub_Scalar::name, sub_Scalar::overload_name)
      .typed<sub_Scalar::schema>();
}

// aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
at::Tensor sub_Scalar::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_sub_Scalar_typed_handle();
    return op.call(self, other, alpha);
}

// aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
at::Tensor sub_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_sub_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sub__Scalar::schema> create_sub__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sub__Scalar::name, sub__Scalar::overload_name)
      .typed<sub__Scalar::schema>();
}

// aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
at::Tensor & sub__Scalar::call(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_sub__Scalar_typed_handle();
    return op.call(self, other, alpha);
}

// aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
at::Tensor & sub__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_sub__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<subtract_out::schema> create_subtract_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(subtract_out::name, subtract_out::overload_name)
      .typed<subtract_out::schema>();
}

// aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & subtract_out::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_subtract_out_typed_handle();
    return op.call(self, other, alpha, out);
}

// aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & subtract_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_subtract_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha, out);
}

// aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<subtract_Tensor::schema> create_subtract_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(subtract_Tensor::name, subtract_Tensor::overload_name)
      .typed<subtract_Tensor::schema>();
}

// aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
at::Tensor subtract_Tensor::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_subtract_Tensor_typed_handle();
    return op.call(self, other, alpha);
}

// aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
at::Tensor subtract_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_subtract_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<subtract__Tensor::schema> create_subtract__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(subtract__Tensor::name, subtract__Tensor::overload_name)
      .typed<subtract__Tensor::schema>();
}

// aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
at::Tensor & subtract__Tensor::call(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_subtract__Tensor_typed_handle();
    return op.call(self, other, alpha);
}

// aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
at::Tensor & subtract__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_subtract__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<subtract_Scalar::schema> create_subtract_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(subtract_Scalar::name, subtract_Scalar::overload_name)
      .typed<subtract_Scalar::schema>();
}

// aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
at::Tensor subtract_Scalar::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_subtract_Scalar_typed_handle();
    return op.call(self, other, alpha);
}

// aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
at::Tensor subtract_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_subtract_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<subtract__Scalar::schema> create_subtract__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(subtract__Scalar::name, subtract__Scalar::overload_name)
      .typed<subtract__Scalar::schema>();
}

// aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
at::Tensor & subtract__Scalar::call(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_subtract__Scalar_typed_handle();
    return op.call(self, other, alpha);
}

// aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
at::Tensor & subtract__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_subtract__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rsub_Tensor::schema> create_rsub_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rsub_Tensor::name, rsub_Tensor::overload_name)
      .typed<rsub_Tensor::schema>();
}

// aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
at::Tensor rsub_Tensor::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_rsub_Tensor_typed_handle();
    return op.call(self, other, alpha);
}

// aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
at::Tensor rsub_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create_rsub_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<heaviside_out::schema> create_heaviside_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(heaviside_out::name, heaviside_out::overload_name)
      .typed<heaviside_out::schema>();
}

// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & heaviside_out::call(const at::Tensor & self, const at::Tensor & values, at::Tensor & out) {
    
    static auto op = create_heaviside_out_typed_handle();
    return op.call(self, values, out);
}

// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & heaviside_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & values, at::Tensor & out) {
    
    static auto op = create_heaviside_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, values, out);
}

// aten::heaviside(Tensor self, Tensor values) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<heaviside::schema> create_heaviside_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(heaviside::name, heaviside::overload_name)
      .typed<heaviside::schema>();
}

// aten::heaviside(Tensor self, Tensor values) -> Tensor
at::Tensor heaviside::call(const at::Tensor & self, const at::Tensor & values) {
    
    static auto op = create_heaviside_typed_handle();
    return op.call(self, values);
}

// aten::heaviside(Tensor self, Tensor values) -> Tensor
at::Tensor heaviside::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & values) {
    
    static auto op = create_heaviside_typed_handle();
    return op.redispatch(dispatchKeySet, self, values);
}

// aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<heaviside_::schema> create_heaviside__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(heaviside_::name, heaviside_::overload_name)
      .typed<heaviside_::schema>();
}

// aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)
at::Tensor & heaviside_::call(at::Tensor & self, const at::Tensor & values) {
    
    static auto op = create_heaviside__typed_handle();
    return op.call(self, values);
}

// aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)
at::Tensor & heaviside_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & values) {
    
    static auto op = create_heaviside__typed_handle();
    return op.redispatch(dispatchKeySet, self, values);
}

// aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rsub_Scalar::schema> create_rsub_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rsub_Scalar::name, rsub_Scalar::overload_name)
      .typed<rsub_Scalar::schema>();
}

// aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
at::Tensor rsub_Scalar::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_rsub_Scalar_typed_handle();
    return op.call(self, other, alpha);
}

// aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
at::Tensor rsub_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
    
    static auto op = create_rsub_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_addmm::schema> create__sparse_addmm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_addmm::name, _sparse_addmm::overload_name)
      .typed<_sparse_addmm::schema>();
}

// aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor _sparse_addmm::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create__sparse_addmm_typed_handle();
    return op.call(self, mat1, mat2, beta, alpha);
}

// aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor _sparse_addmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create__sparse_addmm_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha);
}

// aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sparse_sampled_addmm_out::schema> create_sparse_sampled_addmm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_sampled_addmm_out::name, sparse_sampled_addmm_out::overload_name)
      .typed<sparse_sampled_addmm_out::schema>();
}

// aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sparse_sampled_addmm_out::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_sparse_sampled_addmm_out_typed_handle();
    return op.call(self, mat1, mat2, beta, alpha, out);
}

// aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sparse_sampled_addmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_sparse_sampled_addmm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out);
}

// aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_sampled_addmm::schema> create_sparse_sampled_addmm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_sampled_addmm::name, sparse_sampled_addmm::overload_name)
      .typed<sparse_sampled_addmm::schema>();
}

// aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor sparse_sampled_addmm::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_sparse_sampled_addmm_typed_handle();
    return op.call(self, mat1, mat2, beta, alpha);
}

// aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor sparse_sampled_addmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_sparse_sampled_addmm_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha);
}

// aten::_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_mm_reduce_impl::schema> create__sparse_mm_reduce_impl_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_mm_reduce_impl::name, _sparse_mm_reduce_impl::overload_name)
      .typed<_sparse_mm_reduce_impl::schema>();
}

// aten::_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl::call(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) {
    
    static auto op = create__sparse_mm_reduce_impl_typed_handle();
    return op.call(self, other, reduce);
}

// aten::_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) {
    
    static auto op = create__sparse_mm_reduce_impl_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, reduce);
}

// aten::_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_mm_reduce_impl_backward::schema> create__sparse_mm_reduce_impl_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_mm_reduce_impl_backward::name, _sparse_mm_reduce_impl_backward::overload_name)
      .typed<_sparse_mm_reduce_impl_backward::schema>();
}

// aten::_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_backward::call(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) {
    
    static auto op = create__sparse_mm_reduce_impl_backward_typed_handle();
    return op.call(self, grad_out, weight, reduce, arg_out, output_mask);
}

// aten::_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) {
    
    static auto op = create__sparse_mm_reduce_impl_backward_typed_handle();
    return op.redispatch(dispatchKeySet, self, grad_out, weight, reduce, arg_out, output_mask);
}

// aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<addmm_out::schema> create_addmm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addmm_out::name, addmm_out::overload_name)
      .typed<addmm_out::schema>();
}

// aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & addmm_out::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_addmm_out_typed_handle();
    return op.call(self, mat1, mat2, beta, alpha, out);
}

// aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & addmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_addmm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out);
}

// aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<addmm::schema> create_addmm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addmm::name, addmm::overload_name)
      .typed<addmm::schema>();
}

// aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor addmm::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addmm_typed_handle();
    return op.call(self, mat1, mat2, beta, alpha);
}

// aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor addmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addmm_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha);
}

// aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<addmm_::schema> create_addmm__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addmm_::name, addmm_::overload_name)
      .typed<addmm_::schema>();
}

// aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
at::Tensor & addmm_::call(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addmm__typed_handle();
    return op.call(self, mat1, mat2, beta, alpha);
}

// aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
at::Tensor & addmm_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addmm__typed_handle();
    return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha);
}

// aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_addmm_activation_out::schema> create__addmm_activation_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_addmm_activation_out::name, _addmm_activation_out::overload_name)
      .typed<_addmm_activation_out::schema>();
}

// aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _addmm_activation_out::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out) {
    
    static auto op = create__addmm_activation_out_typed_handle();
    return op.call(self, mat1, mat2, beta, alpha, use_gelu, out);
}

// aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _addmm_activation_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out) {
    
    static auto op = create__addmm_activation_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, use_gelu, out);
}

// aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_addmm_activation::schema> create__addmm_activation_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_addmm_activation::name, _addmm_activation::overload_name)
      .typed<_addmm_activation::schema>();
}

// aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor
at::Tensor _addmm_activation::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
    
    static auto op = create__addmm_activation_typed_handle();
    return op.call(self, mat1, mat2, beta, alpha, use_gelu);
}

// aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor
at::Tensor _addmm_activation::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
    
    static auto op = create__addmm_activation_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, use_gelu);
}

// aten::_scaled_mm(Tensor self, Tensor mat2, Tensor scale_a, Tensor scale_b, Tensor? bias=None, Tensor? scale_result=None, ScalarType? out_dtype=None, bool use_fast_accum=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_mm::schema> create__scaled_mm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_mm::name, _scaled_mm::overload_name)
      .typed<_scaled_mm::schema>();
}

// aten::_scaled_mm(Tensor self, Tensor mat2, Tensor scale_a, Tensor scale_b, Tensor? bias=None, Tensor? scale_result=None, ScalarType? out_dtype=None, bool use_fast_accum=False) -> Tensor
at::Tensor _scaled_mm::call(const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scale_a, const at::Tensor & scale_b, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & scale_result, ::std::optional<at::ScalarType> out_dtype, bool use_fast_accum) {
    
    static auto op = create__scaled_mm_typed_handle();
    return op.call(self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum);
}

// aten::_scaled_mm(Tensor self, Tensor mat2, Tensor scale_a, Tensor scale_b, Tensor? bias=None, Tensor? scale_result=None, ScalarType? out_dtype=None, bool use_fast_accum=False) -> Tensor
at::Tensor _scaled_mm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scale_a, const at::Tensor & scale_b, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & scale_result, ::std::optional<at::ScalarType> out_dtype, bool use_fast_accum) {
    
    static auto op = create__scaled_mm_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum);
}

// aten::_scaled_mm.out(Tensor self, Tensor mat2, Tensor scale_a, Tensor scale_b, Tensor? bias=None, Tensor? scale_result=None, ScalarType? out_dtype=None, bool use_fast_accum=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_mm_out::schema> create__scaled_mm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_mm_out::name, _scaled_mm_out::overload_name)
      .typed<_scaled_mm_out::schema>();
}

// aten::_scaled_mm.out(Tensor self, Tensor mat2, Tensor scale_a, Tensor scale_b, Tensor? bias=None, Tensor? scale_result=None, ScalarType? out_dtype=None, bool use_fast_accum=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _scaled_mm_out::call(const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scale_a, const at::Tensor & scale_b, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & scale_result, ::std::optional<at::ScalarType> out_dtype, bool use_fast_accum, at::Tensor & out) {
    
    static auto op = create__scaled_mm_out_typed_handle();
    return op.call(self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum, out);
}

// aten::_scaled_mm.out(Tensor self, Tensor mat2, Tensor scale_a, Tensor scale_b, Tensor? bias=None, Tensor? scale_result=None, ScalarType? out_dtype=None, bool use_fast_accum=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _scaled_mm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scale_a, const at::Tensor & scale_b, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & scale_result, ::std::optional<at::ScalarType> out_dtype, bool use_fast_accum, at::Tensor & out) {
    
    static auto op = create__scaled_mm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum, out);
}

// aten::_scaled_grouped_mm(Tensor self, Tensor mat2, Tensor scale_a, Tensor scale_b, Tensor? offs=None, Tensor? bias=None, Tensor? scale_result=None, ScalarType? out_dtype=None, bool use_fast_accum=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_grouped_mm::schema> create__scaled_grouped_mm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_grouped_mm::name, _scaled_grouped_mm::overload_name)
      .typed<_scaled_grouped_mm::schema>();
}

// aten::_scaled_grouped_mm(Tensor self, Tensor mat2, Tensor scale_a, Tensor scale_b, Tensor? offs=None, Tensor? bias=None, Tensor? scale_result=None, ScalarType? out_dtype=None, bool use_fast_accum=False) -> Tensor
at::Tensor _scaled_grouped_mm::call(const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scale_a, const at::Tensor & scale_b, const ::std::optional<at::Tensor> & offs, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & scale_result, ::std::optional<at::ScalarType> out_dtype, bool use_fast_accum) {
    
    static auto op = create__scaled_grouped_mm_typed_handle();
    return op.call(self, mat2, scale_a, scale_b, offs, bias, scale_result, out_dtype, use_fast_accum);
}

// aten::_scaled_grouped_mm(Tensor self, Tensor mat2, Tensor scale_a, Tensor scale_b, Tensor? offs=None, Tensor? bias=None, Tensor? scale_result=None, ScalarType? out_dtype=None, bool use_fast_accum=False) -> Tensor
at::Tensor _scaled_grouped_mm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scale_a, const at::Tensor & scale_b, const ::std::optional<at::Tensor> & offs, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & scale_result, ::std::optional<at::ScalarType> out_dtype, bool use_fast_accum) {
    
    static auto op = create__scaled_grouped_mm_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat2, scale_a, scale_b, offs, bias, scale_result, out_dtype, use_fast_accum);
}

// aten::_sparse_compressed_tensor_with_dims(int nnz, int dense_dim, int[] size, int[] blocksize, ScalarType index_dtype, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_compressed_tensor_with_dims::schema> create__sparse_compressed_tensor_with_dims_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_compressed_tensor_with_dims::name, _sparse_compressed_tensor_with_dims::overload_name)
      .typed<_sparse_compressed_tensor_with_dims::schema>();
}

// aten::_sparse_compressed_tensor_with_dims(int nnz, int dense_dim, int[] size, int[] blocksize, ScalarType index_dtype, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor _sparse_compressed_tensor_with_dims::call(int64_t nnz, int64_t dense_dim, at::IntArrayRef size, at::IntArrayRef blocksize, at::ScalarType index_dtype, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_compressed_tensor_with_dims_typed_handle();
    return op.call(nnz, dense_dim, size, blocksize, index_dtype, dtype, layout, device, pin_memory);
}

// aten::_sparse_compressed_tensor_with_dims(int nnz, int dense_dim, int[] size, int[] blocksize, ScalarType index_dtype, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor _sparse_compressed_tensor_with_dims::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t nnz, int64_t dense_dim, at::IntArrayRef size, at::IntArrayRef blocksize, at::ScalarType index_dtype, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_compressed_tensor_with_dims_typed_handle();
    return op.redispatch(dispatchKeySet, nnz, dense_dim, size, blocksize, index_dtype, dtype, layout, device, pin_memory);
}

// aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_compressed_tensor_comp_plain_value_size::schema> create_sparse_compressed_tensor_comp_plain_value_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_compressed_tensor_comp_plain_value_size::name, sparse_compressed_tensor_comp_plain_value_size::overload_name)
      .typed<sparse_compressed_tensor_comp_plain_value_size::schema>();
}

// aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_compressed_tensor_comp_plain_value_size::call(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_compressed_tensor_comp_plain_value_size_typed_handle();
    return op.call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_compressed_tensor_comp_plain_value_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_compressed_tensor_comp_plain_value_size_typed_handle();
    return op.redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_csr_tensor_crow_col_value_size::schema> create_sparse_csr_tensor_crow_col_value_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_csr_tensor_crow_col_value_size::name, sparse_csr_tensor_crow_col_value_size::overload_name)
      .typed<sparse_csr_tensor_crow_col_value_size::schema>();
}

// aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_csr_tensor_crow_col_value_size::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_csr_tensor_crow_col_value_size_typed_handle();
    return op.call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_csr_tensor_crow_col_value_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_csr_tensor_crow_col_value_size_typed_handle();
    return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_csc_tensor_ccol_row_value_size::schema> create_sparse_csc_tensor_ccol_row_value_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_csc_tensor_ccol_row_value_size::name, sparse_csc_tensor_ccol_row_value_size::overload_name)
      .typed<sparse_csc_tensor_ccol_row_value_size::schema>();
}

// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_csc_tensor_ccol_row_value_size::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_csc_tensor_ccol_row_value_size_typed_handle();
    return op.call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_csc_tensor_ccol_row_value_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_csc_tensor_ccol_row_value_size_typed_handle();
    return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_bsr_tensor_crow_col_value_size::schema> create_sparse_bsr_tensor_crow_col_value_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_bsr_tensor_crow_col_value_size::name, sparse_bsr_tensor_crow_col_value_size::overload_name)
      .typed<sparse_bsr_tensor_crow_col_value_size::schema>();
}

// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_bsr_tensor_crow_col_value_size::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_bsr_tensor_crow_col_value_size_typed_handle();
    return op.call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_bsr_tensor_crow_col_value_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_bsr_tensor_crow_col_value_size_typed_handle();
    return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_bsc_tensor_ccol_row_value_size::schema> create_sparse_bsc_tensor_ccol_row_value_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_bsc_tensor_ccol_row_value_size::name, sparse_bsc_tensor_ccol_row_value_size::overload_name)
      .typed<sparse_bsc_tensor_ccol_row_value_size::schema>();
}

// aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_bsc_tensor_ccol_row_value_size::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_bsc_tensor_ccol_row_value_size_typed_handle();
    return op.call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_bsc_tensor_ccol_row_value_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_bsc_tensor_ccol_row_value_size_typed_handle();
    return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_compressed_tensor_comp_plain_value::schema> create_sparse_compressed_tensor_comp_plain_value_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_compressed_tensor_comp_plain_value::name, sparse_compressed_tensor_comp_plain_value::overload_name)
      .typed<sparse_compressed_tensor_comp_plain_value::schema>();
}

// aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_compressed_tensor_comp_plain_value::call(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_compressed_tensor_comp_plain_value_typed_handle();
    return op.call(compressed_indices, plain_indices, values, dtype, layout, device, pin_memory);
}

// aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_compressed_tensor_comp_plain_value::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_compressed_tensor_comp_plain_value_typed_handle();
    return op.redispatch(dispatchKeySet, compressed_indices, plain_indices, values, dtype, layout, device, pin_memory);
}

// aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_csr_tensor_crow_col_value::schema> create_sparse_csr_tensor_crow_col_value_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_csr_tensor_crow_col_value::name, sparse_csr_tensor_crow_col_value::overload_name)
      .typed<sparse_csr_tensor_crow_col_value::schema>();
}

// aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_csr_tensor_crow_col_value::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_csr_tensor_crow_col_value_typed_handle();
    return op.call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
}

// aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_csr_tensor_crow_col_value::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_csr_tensor_crow_col_value_typed_handle();
    return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, dtype, layout, device, pin_memory);
}

// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_csc_tensor_ccol_row_value::schema> create_sparse_csc_tensor_ccol_row_value_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_csc_tensor_ccol_row_value::name, sparse_csc_tensor_ccol_row_value::overload_name)
      .typed<sparse_csc_tensor_ccol_row_value::schema>();
}

// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_csc_tensor_ccol_row_value::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_csc_tensor_ccol_row_value_typed_handle();
    return op.call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
}

// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_csc_tensor_ccol_row_value::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_csc_tensor_ccol_row_value_typed_handle();
    return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
}

// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_bsr_tensor_crow_col_value::schema> create_sparse_bsr_tensor_crow_col_value_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_bsr_tensor_crow_col_value::name, sparse_bsr_tensor_crow_col_value::overload_name)
      .typed<sparse_bsr_tensor_crow_col_value::schema>();
}

// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_bsr_tensor_crow_col_value::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_bsr_tensor_crow_col_value_typed_handle();
    return op.call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
}

// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_bsr_tensor_crow_col_value::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_bsr_tensor_crow_col_value_typed_handle();
    return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, dtype, layout, device, pin_memory);
}

// aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_bsc_tensor_ccol_row_value::schema> create_sparse_bsc_tensor_ccol_row_value_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_bsc_tensor_ccol_row_value::name, sparse_bsc_tensor_ccol_row_value::overload_name)
      .typed<sparse_bsc_tensor_ccol_row_value::schema>();
}

// aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_bsc_tensor_ccol_row_value::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_bsc_tensor_ccol_row_value_typed_handle();
    return op.call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
}

// aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_bsc_tensor_ccol_row_value::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_bsc_tensor_ccol_row_value_typed_handle();
    return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
}

// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_compressed_tensor_unsafe::schema> create__sparse_compressed_tensor_unsafe_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_compressed_tensor_unsafe::name, _sparse_compressed_tensor_unsafe::overload_name)
      .typed<_sparse_compressed_tensor_unsafe::schema>();
}

// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _sparse_compressed_tensor_unsafe::call(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_compressed_tensor_unsafe_typed_handle();
    return op.call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _sparse_compressed_tensor_unsafe::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_compressed_tensor_unsafe_typed_handle();
    return op.redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_csr_tensor_unsafe::schema> create__sparse_csr_tensor_unsafe_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_csr_tensor_unsafe::name, _sparse_csr_tensor_unsafe::overload_name)
      .typed<_sparse_csr_tensor_unsafe::schema>();
}

// aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _sparse_csr_tensor_unsafe::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_csr_tensor_unsafe_typed_handle();
    return op.call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _sparse_csr_tensor_unsafe::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_csr_tensor_unsafe_typed_handle();
    return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_csc_tensor_unsafe::schema> create__sparse_csc_tensor_unsafe_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_csc_tensor_unsafe::name, _sparse_csc_tensor_unsafe::overload_name)
      .typed<_sparse_csc_tensor_unsafe::schema>();
}

// aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _sparse_csc_tensor_unsafe::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_csc_tensor_unsafe_typed_handle();
    return op.call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _sparse_csc_tensor_unsafe::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_csc_tensor_unsafe_typed_handle();
    return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_bsr_tensor_unsafe::schema> create__sparse_bsr_tensor_unsafe_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_bsr_tensor_unsafe::name, _sparse_bsr_tensor_unsafe::overload_name)
      .typed<_sparse_bsr_tensor_unsafe::schema>();
}

// aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _sparse_bsr_tensor_unsafe::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_bsr_tensor_unsafe_typed_handle();
    return op.call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _sparse_bsr_tensor_unsafe::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_bsr_tensor_unsafe_typed_handle();
    return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_bsc_tensor_unsafe::schema> create__sparse_bsc_tensor_unsafe_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_bsc_tensor_unsafe::name, _sparse_bsc_tensor_unsafe::overload_name)
      .typed<_sparse_bsc_tensor_unsafe::schema>();
}

// aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _sparse_bsc_tensor_unsafe::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_bsc_tensor_unsafe_typed_handle();
    return op.call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _sparse_bsc_tensor_unsafe::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_bsc_tensor_unsafe_typed_handle();
    return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
}

// aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_coo_tensor_size::schema> create_sparse_coo_tensor_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_coo_tensor_size::name, sparse_coo_tensor_size::overload_name)
      .typed<sparse_coo_tensor_size::schema>();
}

// aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_coo_tensor_size::call(at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_coo_tensor_size_typed_handle();
    return op.call(size, dtype, layout, device, pin_memory);
}

// aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor sparse_coo_tensor_size::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_sparse_coo_tensor_size_typed_handle();
    return op.redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory);
}

// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_coo_tensor_indices::schema> create_sparse_coo_tensor_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_coo_tensor_indices::name, sparse_coo_tensor_indices::overload_name)
      .typed<sparse_coo_tensor_indices::schema>();
}

// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor
at::Tensor sparse_coo_tensor_indices::call(const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
    
    static auto op = create_sparse_coo_tensor_indices_typed_handle();
    return op.call(indices, values, dtype, layout, device, pin_memory, is_coalesced);
}

// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor
at::Tensor sparse_coo_tensor_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
    
    static auto op = create_sparse_coo_tensor_indices_typed_handle();
    return op.redispatch(dispatchKeySet, indices, values, dtype, layout, device, pin_memory, is_coalesced);
}

// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_coo_tensor_indices_size::schema> create_sparse_coo_tensor_indices_size_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_coo_tensor_indices_size::name, sparse_coo_tensor_indices_size::overload_name)
      .typed<sparse_coo_tensor_indices_size::schema>();
}

// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor
at::Tensor sparse_coo_tensor_indices_size::call(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
    
    static auto op = create_sparse_coo_tensor_indices_size_typed_handle();
    return op.call(indices, values, size, dtype, layout, device, pin_memory, is_coalesced);
}

// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor
at::Tensor sparse_coo_tensor_indices_size::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
    
    static auto op = create_sparse_coo_tensor_indices_size_typed_handle();
    return op.redispatch(dispatchKeySet, indices, values, size, dtype, layout, device, pin_memory, is_coalesced);
}

// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_coo_tensor_unsafe::schema> create__sparse_coo_tensor_unsafe_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_coo_tensor_unsafe::name, _sparse_coo_tensor_unsafe::overload_name)
      .typed<_sparse_coo_tensor_unsafe::schema>();
}

// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor
at::Tensor _sparse_coo_tensor_unsafe::call(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
    
    static auto op = create__sparse_coo_tensor_unsafe_typed_handle();
    return op.call(indices, values, size, dtype, layout, device, pin_memory, is_coalesced);
}

// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor
at::Tensor _sparse_coo_tensor_unsafe::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
    
    static auto op = create__sparse_coo_tensor_unsafe_typed_handle();
    return op.redispatch(dispatchKeySet, indices, values, size, dtype, layout, device, pin_memory, is_coalesced);
}

// aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size, bool? is_coalesced=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_validate_sparse_coo_tensor_args::schema> create__validate_sparse_coo_tensor_args_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_validate_sparse_coo_tensor_args::name, _validate_sparse_coo_tensor_args::overload_name)
      .typed<_validate_sparse_coo_tensor_args::schema>();
}

// aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size, bool? is_coalesced=None) -> ()
void _validate_sparse_coo_tensor_args::call(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<bool> is_coalesced) {
    
    static auto op = create__validate_sparse_coo_tensor_args_typed_handle();
    return op.call(indices, values, size, is_coalesced);
}

// aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size, bool? is_coalesced=None) -> ()
void _validate_sparse_coo_tensor_args::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<bool> is_coalesced) {
    
    static auto op = create__validate_sparse_coo_tensor_args_typed_handle();
    return op.redispatch(dispatchKeySet, indices, values, size, is_coalesced);
}

// aten::_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_validate_sparse_compressed_tensor_args::schema> create__validate_sparse_compressed_tensor_args_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_validate_sparse_compressed_tensor_args::name, _validate_sparse_compressed_tensor_args::overload_name)
      .typed<_validate_sparse_compressed_tensor_args::schema>();
}

// aten::_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()
void _validate_sparse_compressed_tensor_args::call(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) {
    
    static auto op = create__validate_sparse_compressed_tensor_args_typed_handle();
    return op.call(compressed_indices, plain_indices, values, size, layout);
}

// aten::_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()
void _validate_sparse_compressed_tensor_args::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) {
    
    static auto op = create__validate_sparse_compressed_tensor_args_typed_handle();
    return op.redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, layout);
}

// aten::_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_validate_sparse_csr_tensor_args::schema> create__validate_sparse_csr_tensor_args_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_validate_sparse_csr_tensor_args::name, _validate_sparse_csr_tensor_args::overload_name)
      .typed<_validate_sparse_csr_tensor_args::schema>();
}

// aten::_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
void _validate_sparse_csr_tensor_args::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
    
    static auto op = create__validate_sparse_csr_tensor_args_typed_handle();
    return op.call(crow_indices, col_indices, values, size);
}

// aten::_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
void _validate_sparse_csr_tensor_args::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
    
    static auto op = create__validate_sparse_csr_tensor_args_typed_handle();
    return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, size);
}

// aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_validate_sparse_csc_tensor_args::schema> create__validate_sparse_csc_tensor_args_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_validate_sparse_csc_tensor_args::name, _validate_sparse_csc_tensor_args::overload_name)
      .typed<_validate_sparse_csc_tensor_args::schema>();
}

// aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
void _validate_sparse_csc_tensor_args::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
    
    static auto op = create__validate_sparse_csc_tensor_args_typed_handle();
    return op.call(ccol_indices, row_indices, values, size);
}

// aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
void _validate_sparse_csc_tensor_args::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
    
    static auto op = create__validate_sparse_csc_tensor_args_typed_handle();
    return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, size);
}

// aten::_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_validate_sparse_bsr_tensor_args::schema> create__validate_sparse_bsr_tensor_args_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_validate_sparse_bsr_tensor_args::name, _validate_sparse_bsr_tensor_args::overload_name)
      .typed<_validate_sparse_bsr_tensor_args::schema>();
}

// aten::_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
void _validate_sparse_bsr_tensor_args::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
    
    static auto op = create__validate_sparse_bsr_tensor_args_typed_handle();
    return op.call(crow_indices, col_indices, values, size);
}

// aten::_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
void _validate_sparse_bsr_tensor_args::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
    
    static auto op = create__validate_sparse_bsr_tensor_args_typed_handle();
    return op.redispatch(dispatchKeySet, crow_indices, col_indices, values, size);
}

// aten::_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_validate_sparse_bsc_tensor_args::schema> create__validate_sparse_bsc_tensor_args_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_validate_sparse_bsc_tensor_args::name, _validate_sparse_bsc_tensor_args::overload_name)
      .typed<_validate_sparse_bsc_tensor_args::schema>();
}

// aten::_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
void _validate_sparse_bsc_tensor_args::call(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
    
    static auto op = create__validate_sparse_bsc_tensor_args_typed_handle();
    return op.call(ccol_indices, row_indices, values, size);
}

// aten::_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
void _validate_sparse_bsc_tensor_args::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
    
    static auto op = create__validate_sparse_bsc_tensor_args_typed_handle();
    return op.redispatch(dispatchKeySet, ccol_indices, row_indices, values, size);
}

// aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_coo_tensor_with_dims::schema> create__sparse_coo_tensor_with_dims_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_coo_tensor_with_dims::name, _sparse_coo_tensor_with_dims::overload_name)
      .typed<_sparse_coo_tensor_with_dims::schema>();
}

// aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor _sparse_coo_tensor_with_dims::call(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_coo_tensor_with_dims_typed_handle();
    return op.call(sparse_dim, dense_dim, size, dtype, layout, device, pin_memory);
}

// aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
at::Tensor _sparse_coo_tensor_with_dims::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__sparse_coo_tensor_with_dims_typed_handle();
    return op.redispatch(dispatchKeySet, sparse_dim, dense_dim, size, dtype, layout, device, pin_memory);
}

// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_coo_tensor_with_dims_and_tensors::schema> create__sparse_coo_tensor_with_dims_and_tensors_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_coo_tensor_with_dims_and_tensors::name, _sparse_coo_tensor_with_dims_and_tensors::overload_name)
      .typed<_sparse_coo_tensor_with_dims_and_tensors::schema>();
}

// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor
at::Tensor _sparse_coo_tensor_with_dims_and_tensors::call(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
    
    static auto op = create__sparse_coo_tensor_with_dims_and_tensors_typed_handle();
    return op.call(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced);
}

// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False, bool? is_coalesced=None) -> Tensor
at::Tensor _sparse_coo_tensor_with_dims_and_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, ::std::optional<bool> is_coalesced) {
    
    static auto op = create__sparse_coo_tensor_with_dims_and_tensors_typed_handle();
    return op.redispatch(dispatchKeySet, sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced);
}

// aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sparse_resize_::schema> create_sparse_resize__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_resize_::name, sparse_resize_::overload_name)
      .typed<sparse_resize_::schema>();
}

// aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
const at::Tensor & sparse_resize_::call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
    
    static auto op = create_sparse_resize__typed_handle();
    return op.call(self, size, sparse_dim, dense_dim);
}

// aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
const at::Tensor & sparse_resize_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
    
    static auto op = create_sparse_resize__typed_handle();
    return op.redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim);
}

// aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sparse_resize_and_clear_::schema> create_sparse_resize_and_clear__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_resize_and_clear_::name, sparse_resize_and_clear_::overload_name)
      .typed<sparse_resize_and_clear_::schema>();
}

// aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
const at::Tensor & sparse_resize_and_clear_::call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
    
    static auto op = create_sparse_resize_and_clear__typed_handle();
    return op.call(self, size, sparse_dim, dense_dim);
}

// aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)
const at::Tensor & sparse_resize_and_clear_::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
    
    static auto op = create_sparse_resize_and_clear__typed_handle();
    return op.redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim);
}

// aten::sparse_mask(Tensor self, Tensor mask) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_mask::schema> create_sparse_mask_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_mask::name, sparse_mask::overload_name)
      .typed<sparse_mask::schema>();
}

// aten::sparse_mask(Tensor self, Tensor mask) -> Tensor
at::Tensor sparse_mask::call(const at::Tensor & self, const at::Tensor & mask) {
    
    static auto op = create_sparse_mask_typed_handle();
    return op.call(self, mask);
}

// aten::sparse_mask(Tensor self, Tensor mask) -> Tensor
at::Tensor sparse_mask::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask) {
    
    static auto op = create_sparse_mask_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask);
}

// aten::_sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_mask_projection::schema> create__sparse_mask_projection_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_mask_projection::name, _sparse_mask_projection::overload_name)
      .typed<_sparse_mask_projection::schema>();
}

// aten::_sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor
at::Tensor _sparse_mask_projection::call(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches) {
    
    static auto op = create__sparse_mask_projection_typed_handle();
    return op.call(self, mask, accumulate_matches);
}

// aten::_sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor
at::Tensor _sparse_mask_projection::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches) {
    
    static auto op = create__sparse_mask_projection_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, accumulate_matches);
}

// aten::_to_cpu(Tensor[] tensors) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_to_cpu::schema> create__to_cpu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_cpu::name, _to_cpu::overload_name)
      .typed<_to_cpu::schema>();
}

// aten::_to_cpu(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> _to_cpu::call(at::TensorList tensors) {
    
    static auto op = create__to_cpu_typed_handle();
    return op.call(tensors);
}

// aten::_to_cpu(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> _to_cpu::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create__to_cpu_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<to_dense::schema> create_to_dense_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_dense::name, to_dense::overload_name)
      .typed<to_dense::schema>();
}

// aten::to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor
at::Tensor to_dense::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad) {
    
    static auto op = create_to_dense_typed_handle();
    return op.call(self, dtype, masked_grad);
}

// aten::to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor
at::Tensor to_dense::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad) {
    
    static auto op = create_to_dense_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, masked_grad);
}

// aten::_to_dense(Tensor self, ScalarType? dtype=None, bool? masked_grad=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_to_dense::schema> create__to_dense_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_dense::name, _to_dense::overload_name)
      .typed<_to_dense::schema>();
}

// aten::_to_dense(Tensor self, ScalarType? dtype=None, bool? masked_grad=None) -> Tensor
at::Tensor _to_dense::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad) {
    
    static auto op = create__to_dense_typed_handle();
    return op.call(self, dtype, masked_grad);
}

// aten::_to_dense(Tensor self, ScalarType? dtype=None, bool? masked_grad=None) -> Tensor
at::Tensor _to_dense::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad) {
    
    static auto op = create__to_dense_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, masked_grad);
}

// aten::to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<to_dense_backward::schema> create_to_dense_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_dense_backward::name, to_dense_backward::overload_name)
      .typed<to_dense_backward::schema>();
}

// aten::to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor
at::Tensor to_dense_backward::call(const at::Tensor & grad, const at::Tensor & input, ::std::optional<bool> masked_grad) {
    
    static auto op = create_to_dense_backward_typed_handle();
    return op.call(grad, input, masked_grad);
}

// aten::to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor
at::Tensor to_dense_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, ::std::optional<bool> masked_grad) {
    
    static auto op = create_to_dense_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, input, masked_grad);
}

// aten::sparse_dim(Tensor self) -> int
static C10_NOINLINE c10::TypedOperatorHandle<sparse_dim::schema> create_sparse_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_dim::name, sparse_dim::overload_name)
      .typed<sparse_dim::schema>();
}

// aten::sparse_dim(Tensor self) -> int
int64_t sparse_dim::call(const at::Tensor & self) {
    
    static auto op = create_sparse_dim_typed_handle();
    return op.call(self);
}

// aten::sparse_dim(Tensor self) -> int
int64_t sparse_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_sparse_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_dimI(Tensor self) -> int
static C10_NOINLINE c10::TypedOperatorHandle<_dimI::schema> create__dimI_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_dimI::name, _dimI::overload_name)
      .typed<_dimI::schema>();
}

// aten::_dimI(Tensor self) -> int
int64_t _dimI::call(const at::Tensor & self) {
    
    static auto op = create__dimI_typed_handle();
    return op.call(self);
}

// aten::_dimI(Tensor self) -> int
int64_t _dimI::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__dimI_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::dense_dim(Tensor self) -> int
static C10_NOINLINE c10::TypedOperatorHandle<dense_dim::schema> create_dense_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dense_dim::name, dense_dim::overload_name)
      .typed<dense_dim::schema>();
}

// aten::dense_dim(Tensor self) -> int
int64_t dense_dim::call(const at::Tensor & self) {
    
    static auto op = create_dense_dim_typed_handle();
    return op.call(self);
}

// aten::dense_dim(Tensor self) -> int
int64_t dense_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_dense_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_dimV(Tensor self) -> int
static C10_NOINLINE c10::TypedOperatorHandle<_dimV::schema> create__dimV_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_dimV::name, _dimV::overload_name)
      .typed<_dimV::schema>();
}

// aten::_dimV(Tensor self) -> int
int64_t _dimV::call(const at::Tensor & self) {
    
    static auto op = create__dimV_typed_handle();
    return op.call(self);
}

// aten::_dimV(Tensor self) -> int
int64_t _dimV::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__dimV_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_nnz(Tensor self) -> int
static C10_NOINLINE c10::TypedOperatorHandle<_nnz::schema> create__nnz_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nnz::name, _nnz::overload_name)
      .typed<_nnz::schema>();
}

// aten::_nnz(Tensor self) -> int
int64_t _nnz::call(const at::Tensor & self) {
    
    static auto op = create__nnz_typed_handle();
    return op.call(self);
}

// aten::_nnz(Tensor self) -> int
int64_t _nnz::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__nnz_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::coalesce(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<coalesce::schema> create_coalesce_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(coalesce::name, coalesce::overload_name)
      .typed<coalesce::schema>();
}

// aten::coalesce(Tensor(a) self) -> Tensor(a)
at::Tensor coalesce::call(const at::Tensor & self) {
    
    static auto op = create_coalesce_typed_handle();
    return op.call(self);
}

// aten::coalesce(Tensor(a) self) -> Tensor(a)
at::Tensor coalesce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_coalesce_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_coalesce(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_coalesce::schema> create__coalesce_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_coalesce::name, _coalesce::overload_name)
      .typed<_coalesce::schema>();
}

// aten::_coalesce(Tensor self) -> Tensor
at::Tensor _coalesce::call(const at::Tensor & self) {
    
    static auto op = create__coalesce_typed_handle();
    return op.call(self);
}

// aten::_coalesce(Tensor self) -> Tensor
at::Tensor _coalesce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__coalesce_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::is_coalesced(Tensor self) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_coalesced::schema> create_is_coalesced_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_coalesced::name, is_coalesced::overload_name)
      .typed<is_coalesced::schema>();
}

// aten::is_coalesced(Tensor self) -> bool
bool is_coalesced::call(const at::Tensor & self) {
    
    static auto op = create_is_coalesced_typed_handle();
    return op.call(self);
}

// aten::is_coalesced(Tensor self) -> bool
bool is_coalesced::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_is_coalesced_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_indices(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<_indices::schema> create__indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_indices::name, _indices::overload_name)
      .typed<_indices::schema>();
}

// aten::_indices(Tensor(a) self) -> Tensor(a)
at::Tensor _indices::call(const at::Tensor & self) {
    
    static auto op = create__indices_typed_handle();
    return op.call(self);
}

// aten::_indices(Tensor(a) self) -> Tensor(a)
at::Tensor _indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__indices_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_values(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<_values::schema> create__values_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_values::name, _values::overload_name)
      .typed<_values::schema>();
}

// aten::_values(Tensor(a) self) -> Tensor(a)
at::Tensor _values::call(const at::Tensor & self) {
    
    static auto op = create__values_typed_handle();
    return op.call(self);
}

// aten::_values(Tensor(a) self) -> Tensor(a)
at::Tensor _values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__values_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_coalesced_::schema> create__coalesced__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_coalesced_::name, _coalesced_::overload_name)
      .typed<_coalesced_::schema>();
}

// aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)
at::Tensor & _coalesced_::call(at::Tensor & self, bool coalesced) {
    
    static auto op = create__coalesced__typed_handle();
    return op.call(self, coalesced);
}

// aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)
at::Tensor & _coalesced_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, bool coalesced) {
    
    static auto op = create__coalesced__typed_handle();
    return op.redispatch(dispatchKeySet, self, coalesced);
}

// aten::indices(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<indices::schema> create_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(indices::name, indices::overload_name)
      .typed<indices::schema>();
}

// aten::indices(Tensor(a) self) -> Tensor(a)
at::Tensor indices::call(const at::Tensor & self) {
    
    static auto op = create_indices_typed_handle();
    return op.call(self);
}

// aten::indices(Tensor(a) self) -> Tensor(a)
at::Tensor indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_indices_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::values(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<values::schema> create_values_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(values::name, values::overload_name)
      .typed<values::schema>();
}

// aten::values(Tensor(a) self) -> Tensor(a)
at::Tensor values::call(const at::Tensor & self) {
    
    static auto op = create_values_typed_handle();
    return op.call(self);
}

// aten::values(Tensor(a) self) -> Tensor(a)
at::Tensor values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_values_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::crow_indices(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<crow_indices::schema> create_crow_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(crow_indices::name, crow_indices::overload_name)
      .typed<crow_indices::schema>();
}

// aten::crow_indices(Tensor(a) self) -> Tensor(a)
at::Tensor crow_indices::call(const at::Tensor & self) {
    
    static auto op = create_crow_indices_typed_handle();
    return op.call(self);
}

// aten::crow_indices(Tensor(a) self) -> Tensor(a)
at::Tensor crow_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_crow_indices_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::col_indices(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<col_indices::schema> create_col_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(col_indices::name, col_indices::overload_name)
      .typed<col_indices::schema>();
}

// aten::col_indices(Tensor(a) self) -> Tensor(a)
at::Tensor col_indices::call(const at::Tensor & self) {
    
    static auto op = create_col_indices_typed_handle();
    return op.call(self);
}

// aten::col_indices(Tensor(a) self) -> Tensor(a)
at::Tensor col_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_col_indices_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::ccol_indices(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<ccol_indices::schema> create_ccol_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ccol_indices::name, ccol_indices::overload_name)
      .typed<ccol_indices::schema>();
}

// aten::ccol_indices(Tensor(a) self) -> Tensor(a)
at::Tensor ccol_indices::call(const at::Tensor & self) {
    
    static auto op = create_ccol_indices_typed_handle();
    return op.call(self);
}

// aten::ccol_indices(Tensor(a) self) -> Tensor(a)
at::Tensor ccol_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_ccol_indices_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::row_indices(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<row_indices::schema> create_row_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(row_indices::name, row_indices::overload_name)
      .typed<row_indices::schema>();
}

// aten::row_indices(Tensor(a) self) -> Tensor(a)
at::Tensor row_indices::call(const at::Tensor & self) {
    
    static auto op = create_row_indices_typed_handle();
    return op.call(self);
}

// aten::row_indices(Tensor(a) self) -> Tensor(a)
at::Tensor row_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_row_indices_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hspmm_out::schema> create_hspmm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hspmm_out::name, hspmm_out::overload_name)
      .typed<hspmm_out::schema>();
}

// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hspmm_out::call(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) {
    
    static auto op = create_hspmm_out_typed_handle();
    return op.call(mat1, mat2, out);
}

// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hspmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) {
    
    static auto op = create_hspmm_out_typed_handle();
    return op.redispatch(dispatchKeySet, mat1, mat2, out);
}

// aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hspmm::schema> create_hspmm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hspmm::name, hspmm::overload_name)
      .typed<hspmm::schema>();
}

// aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor
at::Tensor hspmm::call(const at::Tensor & mat1, const at::Tensor & mat2) {
    
    static auto op = create_hspmm_typed_handle();
    return op.call(mat1, mat2);
}

// aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor
at::Tensor hspmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2) {
    
    static auto op = create_hspmm_typed_handle();
    return op.redispatch(dispatchKeySet, mat1, mat2);
}

// aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<copy_sparse_to_sparse_::schema> create_copy_sparse_to_sparse__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(copy_sparse_to_sparse_::name, copy_sparse_to_sparse_::overload_name)
      .typed<copy_sparse_to_sparse_::schema>();
}

// aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
at::Tensor & copy_sparse_to_sparse_::call(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
    
    static auto op = create_copy_sparse_to_sparse__typed_handle();
    return op.call(self, src, non_blocking);
}

// aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
at::Tensor & copy_sparse_to_sparse_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & src, bool non_blocking) {
    
    static auto op = create_copy_sparse_to_sparse__typed_handle();
    return op.redispatch(dispatchKeySet, self, src, non_blocking);
}

// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<unbind_int::schema> create_unbind_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unbind_int::name, unbind_int::overload_name)
      .typed<unbind_int::schema>();
}

// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> unbind_int::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_unbind_int_typed_handle();
    return op.call(self, dim);
}

// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]
::std::vector<at::Tensor> unbind_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_unbind_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]
static C10_NOINLINE c10::TypedOperatorHandle<unbind_Dimname::schema> create_unbind_Dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unbind_Dimname::name, unbind_Dimname::overload_name)
      .typed<unbind_Dimname::schema>();
}

// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]
::std::vector<at::Tensor> unbind_Dimname::call(const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_unbind_Dimname_typed_handle();
    return op.call(self, dim);
}

// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]
::std::vector<at::Tensor> unbind_Dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) {
    
    static auto op = create_unbind_Dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<to_sparse_sparse_dim::schema> create_to_sparse_sparse_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_sparse_sparse_dim::name, to_sparse_sparse_dim::overload_name)
      .typed<to_sparse_sparse_dim::schema>();
}

// aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
at::Tensor to_sparse_sparse_dim::call(const at::Tensor & self, int64_t sparse_dim) {
    
    static auto op = create_to_sparse_sparse_dim_typed_handle();
    return op.call(self, sparse_dim);
}

// aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
at::Tensor to_sparse_sparse_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sparse_dim) {
    
    static auto op = create_to_sparse_sparse_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, sparse_dim);
}

// aten::_to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_to_sparse_sparse_dim::schema> create__to_sparse_sparse_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_sparse_sparse_dim::name, _to_sparse_sparse_dim::overload_name)
      .typed<_to_sparse_sparse_dim::schema>();
}

// aten::_to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
at::Tensor _to_sparse_sparse_dim::call(const at::Tensor & self, int64_t sparse_dim) {
    
    static auto op = create__to_sparse_sparse_dim_typed_handle();
    return op.call(self, sparse_dim);
}

// aten::_to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
at::Tensor _to_sparse_sparse_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sparse_dim) {
    
    static auto op = create__to_sparse_sparse_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, sparse_dim);
}

// aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<to_sparse::schema> create_to_sparse_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_sparse::name, to_sparse::overload_name)
      .typed<to_sparse::schema>();
}

// aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
at::Tensor to_sparse::call(const at::Tensor & self, ::std::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create_to_sparse_typed_handle();
    return op.call(self, layout, blocksize, dense_dim);
}

// aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
at::Tensor to_sparse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create_to_sparse_typed_handle();
    return op.redispatch(dispatchKeySet, self, layout, blocksize, dense_dim);
}

// aten::_to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_to_sparse::schema> create__to_sparse_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_sparse::name, _to_sparse::overload_name)
      .typed<_to_sparse::schema>();
}

// aten::_to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
at::Tensor _to_sparse::call(const at::Tensor & self, ::std::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create__to_sparse_typed_handle();
    return op.call(self, layout, blocksize, dense_dim);
}

// aten::_to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor
at::Tensor _to_sparse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create__to_sparse_typed_handle();
    return op.redispatch(dispatchKeySet, self, layout, blocksize, dense_dim);
}

// aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<to_sparse_csr::schema> create_to_sparse_csr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_sparse_csr::name, to_sparse_csr::overload_name)
      .typed<to_sparse_csr::schema>();
}

// aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
at::Tensor to_sparse_csr::call(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create_to_sparse_csr_typed_handle();
    return op.call(self, dense_dim);
}

// aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
at::Tensor to_sparse_csr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create_to_sparse_csr_typed_handle();
    return op.redispatch(dispatchKeySet, self, dense_dim);
}

// aten::_to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_to_sparse_csr::schema> create__to_sparse_csr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_sparse_csr::name, _to_sparse_csr::overload_name)
      .typed<_to_sparse_csr::schema>();
}

// aten::_to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
at::Tensor _to_sparse_csr::call(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create__to_sparse_csr_typed_handle();
    return op.call(self, dense_dim);
}

// aten::_to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor
at::Tensor _to_sparse_csr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create__to_sparse_csr_typed_handle();
    return op.redispatch(dispatchKeySet, self, dense_dim);
}

// aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<to_sparse_csc::schema> create_to_sparse_csc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_sparse_csc::name, to_sparse_csc::overload_name)
      .typed<to_sparse_csc::schema>();
}

// aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
at::Tensor to_sparse_csc::call(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create_to_sparse_csc_typed_handle();
    return op.call(self, dense_dim);
}

// aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
at::Tensor to_sparse_csc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create_to_sparse_csc_typed_handle();
    return op.redispatch(dispatchKeySet, self, dense_dim);
}

// aten::_to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_to_sparse_csc::schema> create__to_sparse_csc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_sparse_csc::name, _to_sparse_csc::overload_name)
      .typed<_to_sparse_csc::schema>();
}

// aten::_to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
at::Tensor _to_sparse_csc::call(const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create__to_sparse_csc_typed_handle();
    return op.call(self, dense_dim);
}

// aten::_to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor
at::Tensor _to_sparse_csc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create__to_sparse_csc_typed_handle();
    return op.redispatch(dispatchKeySet, self, dense_dim);
}

// aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<to_sparse_bsr::schema> create_to_sparse_bsr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_sparse_bsr::name, to_sparse_bsr::overload_name)
      .typed<to_sparse_bsr::schema>();
}

// aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
at::Tensor to_sparse_bsr::call(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create_to_sparse_bsr_typed_handle();
    return op.call(self, blocksize, dense_dim);
}

// aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
at::Tensor to_sparse_bsr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create_to_sparse_bsr_typed_handle();
    return op.redispatch(dispatchKeySet, self, blocksize, dense_dim);
}

// aten::_to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_to_sparse_bsr::schema> create__to_sparse_bsr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_sparse_bsr::name, _to_sparse_bsr::overload_name)
      .typed<_to_sparse_bsr::schema>();
}

// aten::_to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
at::Tensor _to_sparse_bsr::call(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create__to_sparse_bsr_typed_handle();
    return op.call(self, blocksize, dense_dim);
}

// aten::_to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
at::Tensor _to_sparse_bsr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create__to_sparse_bsr_typed_handle();
    return op.redispatch(dispatchKeySet, self, blocksize, dense_dim);
}

// aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<to_sparse_bsc::schema> create_to_sparse_bsc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_sparse_bsc::name, to_sparse_bsc::overload_name)
      .typed<to_sparse_bsc::schema>();
}

// aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
at::Tensor to_sparse_bsc::call(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create_to_sparse_bsc_typed_handle();
    return op.call(self, blocksize, dense_dim);
}

// aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
at::Tensor to_sparse_bsc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create_to_sparse_bsc_typed_handle();
    return op.redispatch(dispatchKeySet, self, blocksize, dense_dim);
}

// aten::_to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_to_sparse_bsc::schema> create__to_sparse_bsc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_sparse_bsc::name, _to_sparse_bsc::overload_name)
      .typed<_to_sparse_bsc::schema>();
}

// aten::_to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
at::Tensor _to_sparse_bsc::call(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create__to_sparse_bsc_typed_handle();
    return op.call(self, blocksize, dense_dim);
}

// aten::_to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor
at::Tensor _to_sparse_bsc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim) {
    
    static auto op = create__to_sparse_bsc_typed_handle();
    return op.redispatch(dispatchKeySet, self, blocksize, dense_dim);
}

// aten::_to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_to_sparse_semi_structured::schema> create__to_sparse_semi_structured_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_sparse_semi_structured::name, _to_sparse_semi_structured::overload_name)
      .typed<_to_sparse_semi_structured::schema>();
}

// aten::_to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _to_sparse_semi_structured::call(const at::Tensor & dense) {
    
    static auto op = create__to_sparse_semi_structured_typed_handle();
    return op.call(dense);
}

// aten::_to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _to_sparse_semi_structured::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dense) {
    
    static auto op = create__to_sparse_semi_structured_typed_handle();
    return op.redispatch(dispatchKeySet, dense);
}

// aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<to_mkldnn::schema> create_to_mkldnn_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_mkldnn::name, to_mkldnn::overload_name)
      .typed<to_mkldnn::schema>();
}

// aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor
at::Tensor to_mkldnn::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_to_mkldnn_typed_handle();
    return op.call(self, dtype);
}

// aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor
at::Tensor to_mkldnn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_to_mkldnn_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype);
}

// aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_reorder_conv2d_weight::schema> create_mkldnn_reorder_conv2d_weight_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_reorder_conv2d_weight::name, mkldnn_reorder_conv2d_weight::overload_name)
      .typed<mkldnn_reorder_conv2d_weight::schema>();
}

// aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor
at::Tensor mkldnn_reorder_conv2d_weight::call(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size) {
    
    static auto op = create_mkldnn_reorder_conv2d_weight_typed_handle();
    return op.call(self, padding, stride, dilation, groups, input_size);
}

// aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor
at::Tensor mkldnn_reorder_conv2d_weight::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size) {
    
    static auto op = create_mkldnn_reorder_conv2d_weight_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding, stride, dilation, groups, input_size);
}

// aten::mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_reorder_conv3d_weight::schema> create_mkldnn_reorder_conv3d_weight_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_reorder_conv3d_weight::name, mkldnn_reorder_conv3d_weight::overload_name)
      .typed<mkldnn_reorder_conv3d_weight::schema>();
}

// aten::mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor
at::Tensor mkldnn_reorder_conv3d_weight::call(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size) {
    
    static auto op = create_mkldnn_reorder_conv3d_weight_typed_handle();
    return op.call(self, padding, stride, dilation, groups, input_size);
}

// aten::mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor
at::Tensor mkldnn_reorder_conv3d_weight::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size) {
    
    static auto op = create_mkldnn_reorder_conv3d_weight_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding, stride, dilation, groups, input_size);
}

// aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<to_mkldnn_backward::schema> create_to_mkldnn_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_mkldnn_backward::name, to_mkldnn_backward::overload_name)
      .typed<to_mkldnn_backward::schema>();
}

// aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor
at::Tensor to_mkldnn_backward::call(const at::Tensor & grad, const at::Tensor & input) {
    
    static auto op = create_to_mkldnn_backward_typed_handle();
    return op.call(grad, input);
}

// aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor
at::Tensor to_mkldnn_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input) {
    
    static auto op = create_to_mkldnn_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, input);
}

// aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor_dynamic::schema> create_quantize_per_tensor_dynamic_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor_dynamic::name, quantize_per_tensor_dynamic::overload_name)
      .typed<quantize_per_tensor_dynamic::schema>();
}

// aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor
at::Tensor quantize_per_tensor_dynamic::call(const at::Tensor & self, at::ScalarType dtype, bool reduce_range) {
    
    static auto op = create_quantize_per_tensor_dynamic_typed_handle();
    return op.call(self, dtype, reduce_range);
}

// aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor
at::Tensor quantize_per_tensor_dynamic::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool reduce_range) {
    
    static auto op = create_quantize_per_tensor_dynamic_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, reduce_range);
}

// aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor::schema> create_quantize_per_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor::name, quantize_per_tensor::overload_name)
      .typed<quantize_per_tensor::schema>();
}

// aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor
at::Tensor quantize_per_tensor::call(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) {
    
    static auto op = create_quantize_per_tensor_typed_handle();
    return op.call(self, scale, zero_point, dtype);
}

// aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor
at::Tensor quantize_per_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) {
    
    static auto op = create_quantize_per_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, dtype);
}

// aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor_tensor_qparams::schema> create_quantize_per_tensor_tensor_qparams_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor_tensor_qparams::name, quantize_per_tensor_tensor_qparams::overload_name)
      .typed<quantize_per_tensor_tensor_qparams::schema>();
}

// aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor
at::Tensor quantize_per_tensor_tensor_qparams::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) {
    
    static auto op = create_quantize_per_tensor_tensor_qparams_typed_handle();
    return op.call(self, scale, zero_point, dtype);
}

// aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor
at::Tensor quantize_per_tensor_tensor_qparams::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) {
    
    static auto op = create_quantize_per_tensor_tensor_qparams_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, dtype);
}

// aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor_tensors::schema> create_quantize_per_tensor_tensors_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor_tensors::name, quantize_per_tensor_tensors::overload_name)
      .typed<quantize_per_tensor_tensors::schema>();
}

// aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]
::std::vector<at::Tensor> quantize_per_tensor_tensors::call(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) {
    
    static auto op = create_quantize_per_tensor_tensors_typed_handle();
    return op.call(tensors, scales, zero_points, dtype);
}

// aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]
::std::vector<at::Tensor> quantize_per_tensor_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) {
    
    static auto op = create_quantize_per_tensor_tensors_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, scales, zero_points, dtype);
}

// aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_channel::schema> create_quantize_per_channel_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_channel::name, quantize_per_channel::overload_name)
      .typed<quantize_per_channel::schema>();
}

// aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor
at::Tensor quantize_per_channel::call(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) {
    
    static auto op = create_quantize_per_channel_typed_handle();
    return op.call(self, scales, zero_points, axis, dtype);
}

// aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor
at::Tensor quantize_per_channel::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) {
    
    static auto op = create_quantize_per_channel_typed_handle();
    return op.redispatch(dispatchKeySet, self, scales, zero_points, axis, dtype);
}

// aten::dequantize.self(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<dequantize_self::schema> create_dequantize_self_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dequantize_self::name, dequantize_self::overload_name)
      .typed<dequantize_self::schema>();
}

// aten::dequantize.self(Tensor self) -> Tensor
at::Tensor dequantize_self::call(const at::Tensor & self) {
    
    static auto op = create_dequantize_self_typed_handle();
    return op.call(self);
}

// aten::dequantize.self(Tensor self) -> Tensor
at::Tensor dequantize_self::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_dequantize_self_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::dequantize.tensors(Tensor[] tensors) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<dequantize_tensors::schema> create_dequantize_tensors_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dequantize_tensors::name, dequantize_tensors::overload_name)
      .typed<dequantize_tensors::schema>();
}

// aten::dequantize.tensors(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> dequantize_tensors::call(at::TensorList tensors) {
    
    static auto op = create_dequantize_tensors_typed_handle();
    return op.call(tensors);
}

// aten::dequantize.tensors(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> dequantize_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_dequantize_tensors_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::q_scale(Tensor self) -> float
static C10_NOINLINE c10::TypedOperatorHandle<q_scale::schema> create_q_scale_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(q_scale::name, q_scale::overload_name)
      .typed<q_scale::schema>();
}

// aten::q_scale(Tensor self) -> float
double q_scale::call(const at::Tensor & self) {
    
    static auto op = create_q_scale_typed_handle();
    return op.call(self);
}

// aten::q_scale(Tensor self) -> float
double q_scale::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_q_scale_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::q_zero_point(Tensor self) -> int
static C10_NOINLINE c10::TypedOperatorHandle<q_zero_point::schema> create_q_zero_point_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(q_zero_point::name, q_zero_point::overload_name)
      .typed<q_zero_point::schema>();
}

// aten::q_zero_point(Tensor self) -> int
int64_t q_zero_point::call(const at::Tensor & self) {
    
    static auto op = create_q_zero_point_typed_handle();
    return op.call(self);
}

// aten::q_zero_point(Tensor self) -> int
int64_t q_zero_point::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_q_zero_point_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::q_per_channel_scales(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<q_per_channel_scales::schema> create_q_per_channel_scales_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(q_per_channel_scales::name, q_per_channel_scales::overload_name)
      .typed<q_per_channel_scales::schema>();
}

// aten::q_per_channel_scales(Tensor self) -> Tensor
at::Tensor q_per_channel_scales::call(const at::Tensor & self) {
    
    static auto op = create_q_per_channel_scales_typed_handle();
    return op.call(self);
}

// aten::q_per_channel_scales(Tensor self) -> Tensor
at::Tensor q_per_channel_scales::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_q_per_channel_scales_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::q_per_channel_zero_points(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<q_per_channel_zero_points::schema> create_q_per_channel_zero_points_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(q_per_channel_zero_points::name, q_per_channel_zero_points::overload_name)
      .typed<q_per_channel_zero_points::schema>();
}

// aten::q_per_channel_zero_points(Tensor self) -> Tensor
at::Tensor q_per_channel_zero_points::call(const at::Tensor & self) {
    
    static auto op = create_q_per_channel_zero_points_typed_handle();
    return op.call(self);
}

// aten::q_per_channel_zero_points(Tensor self) -> Tensor
at::Tensor q_per_channel_zero_points::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_q_per_channel_zero_points_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::q_per_channel_axis(Tensor self) -> int
static C10_NOINLINE c10::TypedOperatorHandle<q_per_channel_axis::schema> create_q_per_channel_axis_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(q_per_channel_axis::name, q_per_channel_axis::overload_name)
      .typed<q_per_channel_axis::schema>();
}

// aten::q_per_channel_axis(Tensor self) -> int
int64_t q_per_channel_axis::call(const at::Tensor & self) {
    
    static auto op = create_q_per_channel_axis_typed_handle();
    return op.call(self);
}

// aten::q_per_channel_axis(Tensor self) -> int
int64_t q_per_channel_axis::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_q_per_channel_axis_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::int_repr(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<int_repr::schema> create_int_repr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(int_repr::name, int_repr::overload_name)
      .typed<int_repr::schema>();
}

// aten::int_repr(Tensor self) -> Tensor
at::Tensor int_repr::call(const at::Tensor & self) {
    
    static auto op = create_int_repr_typed_handle();
    return op.call(self);
}

// aten::int_repr(Tensor self) -> Tensor
at::Tensor int_repr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_int_repr_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_make_per_tensor_quantized_tensor::schema> create__make_per_tensor_quantized_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_make_per_tensor_quantized_tensor::name, _make_per_tensor_quantized_tensor::overload_name)
      .typed<_make_per_tensor_quantized_tensor::schema>();
}

// aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor
at::Tensor _make_per_tensor_quantized_tensor::call(const at::Tensor & self, double scale, int64_t zero_point) {
    
    static auto op = create__make_per_tensor_quantized_tensor_typed_handle();
    return op.call(self, scale, zero_point);
}

// aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor
at::Tensor _make_per_tensor_quantized_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point) {
    
    static auto op = create__make_per_tensor_quantized_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point);
}

// aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_make_per_channel_quantized_tensor::schema> create__make_per_channel_quantized_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_make_per_channel_quantized_tensor::name, _make_per_channel_quantized_tensor::overload_name)
      .typed<_make_per_channel_quantized_tensor::schema>();
}

// aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor
at::Tensor _make_per_channel_quantized_tensor::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) {
    
    static auto op = create__make_per_channel_quantized_tensor_typed_handle();
    return op.call(self, scale, zero_point, axis);
}

// aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor
at::Tensor _make_per_channel_quantized_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) {
    
    static auto op = create__make_per_channel_quantized_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, axis);
}

// aten::qscheme(Tensor self) -> QScheme
static C10_NOINLINE c10::TypedOperatorHandle<qscheme::schema> create_qscheme_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(qscheme::name, qscheme::overload_name)
      .typed<qscheme::schema>();
}

// aten::qscheme(Tensor self) -> QScheme
at::QScheme qscheme::call(const at::Tensor & self) {
    
    static auto op = create_qscheme_typed_handle();
    return op.call(self);
}

// aten::qscheme(Tensor self) -> QScheme
at::QScheme qscheme::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_qscheme_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_tensor_affine::schema> create_fake_quantize_per_tensor_affine_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fake_quantize_per_tensor_affine::name, fake_quantize_per_tensor_affine::overload_name)
      .typed<fake_quantize_per_tensor_affine::schema>();
}

// aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor
at::Tensor fake_quantize_per_tensor_affine::call(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create_fake_quantize_per_tensor_affine_typed_handle();
    return op.call(self, scale, zero_point, quant_min, quant_max);
}

// aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor
at::Tensor fake_quantize_per_tensor_affine::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create_fake_quantize_per_tensor_affine_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max);
}

// aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_tensor_affine_tensor_qparams::schema> create_fake_quantize_per_tensor_affine_tensor_qparams_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fake_quantize_per_tensor_affine_tensor_qparams::name, fake_quantize_per_tensor_affine_tensor_qparams::overload_name)
      .typed<fake_quantize_per_tensor_affine_tensor_qparams::schema>();
}

// aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor
at::Tensor fake_quantize_per_tensor_affine_tensor_qparams::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create_fake_quantize_per_tensor_affine_tensor_qparams_typed_handle();
    return op.call(self, scale, zero_point, quant_min, quant_max);
}

// aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor
at::Tensor fake_quantize_per_tensor_affine_tensor_qparams::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create_fake_quantize_per_tensor_affine_tensor_qparams_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max);
}

// aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_tensor_affine_cachemask::schema> create_fake_quantize_per_tensor_affine_cachemask_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fake_quantize_per_tensor_affine_cachemask::name, fake_quantize_per_tensor_affine_cachemask::overload_name)
      .typed<fake_quantize_per_tensor_affine_cachemask::schema>();
}

// aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_tensor_affine_cachemask::call(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create_fake_quantize_per_tensor_affine_cachemask_typed_handle();
    return op.call(self, scale, zero_point, quant_min, quant_max);
}

// aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_tensor_affine_cachemask::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create_fake_quantize_per_tensor_affine_cachemask_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max);
}

// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::schema> create__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::name, _fake_quantize_per_tensor_affine_cachemask_tensor_qparams::overload_name)
      .typed<_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::schema>();
}

// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
::std::tuple<at::Tensor,at::Tensor> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_typed_handle();
    return op.call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max);
}

// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
::std::tuple<at::Tensor,at::Tensor> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, fake_quant_enabled, quant_min, quant_max);
}

// aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_tensor_affine_cachemask_backward::schema> create_fake_quantize_per_tensor_affine_cachemask_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fake_quantize_per_tensor_affine_cachemask_backward::name, fake_quantize_per_tensor_affine_cachemask_backward::overload_name)
      .typed<fake_quantize_per_tensor_affine_cachemask_backward::schema>();
}

// aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
at::Tensor fake_quantize_per_tensor_affine_cachemask_backward::call(const at::Tensor & grad, const at::Tensor & mask) {
    
    static auto op = create_fake_quantize_per_tensor_affine_cachemask_backward_typed_handle();
    return op.call(grad, mask);
}

// aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
at::Tensor fake_quantize_per_tensor_affine_cachemask_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & mask) {
    
    static auto op = create_fake_quantize_per_tensor_affine_cachemask_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, mask);
}

// aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_learnable_per_tensor_affine::schema> create__fake_quantize_learnable_per_tensor_affine_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fake_quantize_learnable_per_tensor_affine::name, _fake_quantize_learnable_per_tensor_affine::overload_name)
      .typed<_fake_quantize_learnable_per_tensor_affine::schema>();
}

// aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
at::Tensor _fake_quantize_learnable_per_tensor_affine::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
    
    static auto op = create__fake_quantize_learnable_per_tensor_affine_typed_handle();
    return op.call(self, scale, zero_point, quant_min, quant_max, grad_factor);
}

// aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
at::Tensor _fake_quantize_learnable_per_tensor_affine::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
    
    static auto op = create__fake_quantize_learnable_per_tensor_affine_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, grad_factor);
}

// aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_learnable_per_tensor_affine_backward::schema> create__fake_quantize_learnable_per_tensor_affine_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fake_quantize_learnable_per_tensor_affine_backward::name, _fake_quantize_learnable_per_tensor_affine_backward::overload_name)
      .typed<_fake_quantize_learnable_per_tensor_affine_backward::schema>();
}

// aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_tensor_affine_backward::call(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
    
    static auto op = create__fake_quantize_learnable_per_tensor_affine_backward_typed_handle();
    return op.call(grad, self, scale, zero_point, quant_min, quant_max, grad_factor);
}

// aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_tensor_affine_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
    
    static auto op = create__fake_quantize_learnable_per_tensor_affine_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, self, scale, zero_point, quant_min, quant_max, grad_factor);
}

// aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_channel_affine::schema> create_fake_quantize_per_channel_affine_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fake_quantize_per_channel_affine::name, fake_quantize_per_channel_affine::overload_name)
      .typed<fake_quantize_per_channel_affine::schema>();
}

// aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor
at::Tensor fake_quantize_per_channel_affine::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create_fake_quantize_per_channel_affine_typed_handle();
    return op.call(self, scale, zero_point, axis, quant_min, quant_max);
}

// aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor
at::Tensor fake_quantize_per_channel_affine::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create_fake_quantize_per_channel_affine_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max);
}

// aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_channel_affine_cachemask::schema> create_fake_quantize_per_channel_affine_cachemask_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fake_quantize_per_channel_affine_cachemask::name, fake_quantize_per_channel_affine_cachemask::overload_name)
      .typed<fake_quantize_per_channel_affine_cachemask::schema>();
}

// aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_channel_affine_cachemask::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create_fake_quantize_per_channel_affine_cachemask_typed_handle();
    return op.call(self, scale, zero_point, axis, quant_min, quant_max);
}

// aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_channel_affine_cachemask::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
    
    static auto op = create_fake_quantize_per_channel_affine_cachemask_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max);
}

// aten::fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_channel_affine_cachemask_backward::schema> create_fake_quantize_per_channel_affine_cachemask_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fake_quantize_per_channel_affine_cachemask_backward::name, fake_quantize_per_channel_affine_cachemask_backward::overload_name)
      .typed<fake_quantize_per_channel_affine_cachemask_backward::schema>();
}

// aten::fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
at::Tensor fake_quantize_per_channel_affine_cachemask_backward::call(const at::Tensor & grad, const at::Tensor & mask) {
    
    static auto op = create_fake_quantize_per_channel_affine_cachemask_backward_typed_handle();
    return op.call(grad, mask);
}

// aten::fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
at::Tensor fake_quantize_per_channel_affine_cachemask_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & mask) {
    
    static auto op = create_fake_quantize_per_channel_affine_cachemask_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, mask);
}

// aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_learnable_per_channel_affine::schema> create__fake_quantize_learnable_per_channel_affine_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fake_quantize_learnable_per_channel_affine::name, _fake_quantize_learnable_per_channel_affine::overload_name)
      .typed<_fake_quantize_learnable_per_channel_affine::schema>();
}

// aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
at::Tensor _fake_quantize_learnable_per_channel_affine::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
    
    static auto op = create__fake_quantize_learnable_per_channel_affine_typed_handle();
    return op.call(self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
}

// aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
at::Tensor _fake_quantize_learnable_per_channel_affine::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
    
    static auto op = create__fake_quantize_learnable_per_channel_affine_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
}

// aten::_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_learnable_per_channel_affine_backward::schema> create__fake_quantize_learnable_per_channel_affine_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fake_quantize_learnable_per_channel_affine_backward::name, _fake_quantize_learnable_per_channel_affine_backward::overload_name)
      .typed<_fake_quantize_learnable_per_channel_affine_backward::schema>();
}

// aten::_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_channel_affine_backward::call(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
    
    static auto op = create__fake_quantize_learnable_per_channel_affine_backward_typed_handle();
    return op.call(grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
}

// aten::_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_channel_affine_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
    
    static auto op = create__fake_quantize_learnable_per_channel_affine_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
}

// aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fused_moving_avg_obs_fake_quant::schema> create_fused_moving_avg_obs_fake_quant_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fused_moving_avg_obs_fake_quant::name, fused_moving_avg_obs_fake_quant::overload_name)
      .typed<fused_moving_avg_obs_fake_quant::schema>();
}

// aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor
at::Tensor fused_moving_avg_obs_fake_quant::call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
    
    static auto op = create_fused_moving_avg_obs_fake_quant_typed_handle();
    return op.call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
}

// aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor
at::Tensor fused_moving_avg_obs_fake_quant::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
    
    static auto op = create_fused_moving_avg_obs_fake_quant_typed_handle();
    return op.redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
}

// aten::_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)
static C10_NOINLINE c10::TypedOperatorHandle<_fused_moving_avg_obs_fq_helper::schema> create__fused_moving_avg_obs_fq_helper_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_moving_avg_obs_fq_helper::name, _fused_moving_avg_obs_fq_helper::overload_name)
      .typed<_fused_moving_avg_obs_fq_helper::schema>();
}

// aten::_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)
::std::tuple<at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper::call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
    
    static auto op = create__fused_moving_avg_obs_fq_helper_typed_handle();
    return op.call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
}

// aten::_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)
::std::tuple<at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
    
    static auto op = create__fused_moving_avg_obs_fq_helper_typed_handle();
    return op.redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
}

// aten::_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)
static C10_NOINLINE c10::TypedOperatorHandle<_choose_qparams_per_tensor::schema> create__choose_qparams_per_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_choose_qparams_per_tensor::name, _choose_qparams_per_tensor::overload_name)
      .typed<_choose_qparams_per_tensor::schema>();
}

// aten::_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)
::std::tuple<double,int64_t> _choose_qparams_per_tensor::call(const at::Tensor & self, bool reduce_range) {
    
    static auto op = create__choose_qparams_per_tensor_typed_handle();
    return op.call(self, reduce_range);
}

// aten::_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)
::std::tuple<double,int64_t> _choose_qparams_per_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool reduce_range) {
    
    static auto op = create__choose_qparams_per_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, reduce_range);
}

// aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_saturate_weight_to_fp16::schema> create__saturate_weight_to_fp16_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_saturate_weight_to_fp16::name, _saturate_weight_to_fp16::overload_name)
      .typed<_saturate_weight_to_fp16::schema>();
}

// aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor
at::Tensor _saturate_weight_to_fp16::call(const at::Tensor & weight) {
    
    static auto op = create__saturate_weight_to_fp16_typed_handle();
    return op.call(weight);
}

// aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor
at::Tensor _saturate_weight_to_fp16::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight) {
    
    static auto op = create__saturate_weight_to_fp16_typed_handle();
    return op.redispatch(dispatchKeySet, weight);
}

// aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<choose_qparams_optimized::schema> create_choose_qparams_optimized_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(choose_qparams_optimized::name, choose_qparams_optimized::overload_name)
      .typed<choose_qparams_optimized::schema>();
}

// aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> choose_qparams_optimized::call(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) {
    
    static auto op = create_choose_qparams_optimized_typed_handle();
    return op.call(input, numel, n_bins, ratio, bit_width);
}

// aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> choose_qparams_optimized::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) {
    
    static auto op = create_choose_qparams_optimized_typed_handle();
    return op.redispatch(dispatchKeySet, input, numel, n_bins, ratio, bit_width);
}

// aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<_autocast_to_reduced_precision::schema> create__autocast_to_reduced_precision_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_autocast_to_reduced_precision::name, _autocast_to_reduced_precision::overload_name)
      .typed<_autocast_to_reduced_precision::schema>();
}

// aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)
at::Tensor _autocast_to_reduced_precision::call(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) {
    
    static auto op = create__autocast_to_reduced_precision_typed_handle();
    return op.call(self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
}

// aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)
at::Tensor _autocast_to_reduced_precision::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) {
    
    static auto op = create__autocast_to_reduced_precision_typed_handle();
    return op.redispatch(dispatchKeySet, self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
}

// aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<_autocast_to_full_precision::schema> create__autocast_to_full_precision_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_autocast_to_full_precision::name, _autocast_to_full_precision::overload_name)
      .typed<_autocast_to_full_precision::schema>();
}

// aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)
at::Tensor _autocast_to_full_precision::call(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) {
    
    static auto op = create__autocast_to_full_precision_typed_handle();
    return op.call(self, cuda_enabled, cpu_enabled);
}

// aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)
at::Tensor _autocast_to_full_precision::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) {
    
    static auto op = create__autocast_to_full_precision_typed_handle();
    return op.redispatch(dispatchKeySet, self, cuda_enabled, cpu_enabled);
}

// aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_to_copy::schema> create__to_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_copy::name, _to_copy::overload_name)
      .typed<_to_copy::schema>();
}

// aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
at::Tensor _to_copy::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, bool non_blocking, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create__to_copy_typed_handle();
    return op.call(self, dtype, layout, device, pin_memory, non_blocking, memory_format);
}

// aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
at::Tensor _to_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, bool non_blocking, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create__to_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, non_blocking, memory_format);
}

// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<to_dtype_layout::schema> create_to_dtype_layout_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_dtype_layout::name, to_dtype_layout::overload_name)
      .typed<to_dtype_layout::schema>();
}

// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
at::Tensor to_dtype_layout::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, bool non_blocking, bool copy, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_to_dtype_layout_typed_handle();
    return op.call(self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
}

// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
at::Tensor to_dtype_layout::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, bool non_blocking, bool copy, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_to_dtype_layout_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
}

// aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<to_device::schema> create_to_device_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_device::name, to_device::overload_name)
      .typed<to_device::schema>();
}

// aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
at::Tensor to_device::call(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_to_device_typed_handle();
    return op.call(self, device, dtype, non_blocking, copy, memory_format);
}

// aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
at::Tensor to_device::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_to_device_typed_handle();
    return op.redispatch(dispatchKeySet, self, device, dtype, non_blocking, copy, memory_format);
}

// aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<to_dtype::schema> create_to_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_dtype::name, to_dtype::overload_name)
      .typed<to_dtype::schema>();
}

// aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
at::Tensor to_dtype::call(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_to_dtype_typed_handle();
    return op.call(self, dtype, non_blocking, copy, memory_format);
}

// aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
at::Tensor to_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_to_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, non_blocking, copy, memory_format);
}

// aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<to_other::schema> create_to_other_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_other::name, to_other::overload_name)
      .typed<to_other::schema>();
}

// aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
at::Tensor to_other::call(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_to_other_typed_handle();
    return op.call(self, other, non_blocking, copy, memory_format);
}

// aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
at::Tensor to_other::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_to_other_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, non_blocking, copy, memory_format);
}

// aten::meshgrid(Tensor[] tensors) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<meshgrid::schema> create_meshgrid_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(meshgrid::name, meshgrid::overload_name)
      .typed<meshgrid::schema>();
}

// aten::meshgrid(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> meshgrid::call(at::TensorList tensors) {
    
    static auto op = create_meshgrid_typed_handle();
    return op.call(tensors);
}

// aten::meshgrid(Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> meshgrid::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_meshgrid_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<meshgrid_indexing::schema> create_meshgrid_indexing_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(meshgrid_indexing::name, meshgrid_indexing::overload_name)
      .typed<meshgrid_indexing::schema>();
}

// aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]
::std::vector<at::Tensor> meshgrid_indexing::call(at::TensorList tensors, c10::string_view indexing) {
    
    static auto op = create_meshgrid_indexing_typed_handle();
    return op.call(tensors, indexing);
}

// aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]
::std::vector<at::Tensor> meshgrid_indexing::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, c10::string_view indexing) {
    
    static auto op = create_meshgrid_indexing_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, indexing);
}

// aten::cartesian_prod(Tensor[] tensors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cartesian_prod::schema> create_cartesian_prod_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cartesian_prod::name, cartesian_prod::overload_name)
      .typed<cartesian_prod::schema>();
}

// aten::cartesian_prod(Tensor[] tensors) -> Tensor
at::Tensor cartesian_prod::call(at::TensorList tensors) {
    
    static auto op = create_cartesian_prod_typed_handle();
    return op.call(tensors);
}

// aten::cartesian_prod(Tensor[] tensors) -> Tensor
at::Tensor cartesian_prod::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_cartesian_prod_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<combinations::schema> create_combinations_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(combinations::name, combinations::overload_name)
      .typed<combinations::schema>();
}

// aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor
at::Tensor combinations::call(const at::Tensor & self, int64_t r, bool with_replacement) {
    
    static auto op = create_combinations_typed_handle();
    return op.call(self, r, with_replacement);
}

// aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor
at::Tensor combinations::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t r, bool with_replacement) {
    
    static auto op = create_combinations_typed_handle();
    return op.redispatch(dispatchKeySet, self, r, with_replacement);
}

// aten::item(Tensor self) -> Scalar
static C10_NOINLINE c10::TypedOperatorHandle<item::schema> create_item_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(item::name, item::overload_name)
      .typed<item::schema>();
}

// aten::item(Tensor self) -> Scalar
at::Scalar item::call(const at::Tensor & self) {
    
    static auto op = create_item_typed_handle();
    return op.call(self);
}

// aten::item(Tensor self) -> Scalar
at::Scalar item::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_item_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType
static C10_NOINLINE c10::TypedOperatorHandle<result_type_Tensor::schema> create_result_type_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(result_type_Tensor::name, result_type_Tensor::overload_name)
      .typed<result_type_Tensor::schema>();
}

// aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType
at::ScalarType result_type_Tensor::call(const at::Tensor & tensor, const at::Tensor & other) {
    
    static auto op = create_result_type_Tensor_typed_handle();
    return op.call(tensor, other);
}

// aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType
at::ScalarType result_type_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & tensor, const at::Tensor & other) {
    
    static auto op = create_result_type_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, tensor, other);
}

// aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType
static C10_NOINLINE c10::TypedOperatorHandle<result_type_Scalar::schema> create_result_type_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(result_type_Scalar::name, result_type_Scalar::overload_name)
      .typed<result_type_Scalar::schema>();
}

// aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType
at::ScalarType result_type_Scalar::call(const at::Tensor & tensor, const at::Scalar & other) {
    
    static auto op = create_result_type_Scalar_typed_handle();
    return op.call(tensor, other);
}

// aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType
at::ScalarType result_type_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & tensor, const at::Scalar & other) {
    
    static auto op = create_result_type_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, tensor, other);
}

// aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType
static C10_NOINLINE c10::TypedOperatorHandle<result_type_Scalar_Tensor::schema> create_result_type_Scalar_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(result_type_Scalar_Tensor::name, result_type_Scalar_Tensor::overload_name)
      .typed<result_type_Scalar_Tensor::schema>();
}

// aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType
at::ScalarType result_type_Scalar_Tensor::call(const at::Scalar & scalar, const at::Tensor & tensor) {
    
    static auto op = create_result_type_Scalar_Tensor_typed_handle();
    return op.call(scalar, tensor);
}

// aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType
at::ScalarType result_type_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & scalar, const at::Tensor & tensor) {
    
    static auto op = create_result_type_Scalar_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, scalar, tensor);
}

// aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType
static C10_NOINLINE c10::TypedOperatorHandle<result_type_Scalar_Scalar::schema> create_result_type_Scalar_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(result_type_Scalar_Scalar::name, result_type_Scalar_Scalar::overload_name)
      .typed<result_type_Scalar_Scalar::schema>();
}

// aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType
at::ScalarType result_type_Scalar_Scalar::call(const at::Scalar & scalar1, const at::Scalar & scalar2) {
    
    static auto op = create_result_type_Scalar_Scalar_typed_handle();
    return op.call(scalar1, scalar2);
}

// aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType
at::ScalarType result_type_Scalar_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & scalar1, const at::Scalar & scalar2) {
    
    static auto op = create_result_type_Scalar_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, scalar1, scalar2);
}

// aten::can_cast(ScalarType from_, ScalarType to) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<can_cast::schema> create_can_cast_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(can_cast::name, can_cast::overload_name)
      .typed<can_cast::schema>();
}

// aten::can_cast(ScalarType from_, ScalarType to) -> bool
bool can_cast::call(at::ScalarType from_, at::ScalarType to) {
    
    static auto op = create_can_cast_typed_handle();
    return op.call(from_, to);
}

// aten::can_cast(ScalarType from_, ScalarType to) -> bool
bool can_cast::redispatch(c10::DispatchKeySet dispatchKeySet, at::ScalarType from_, at::ScalarType to) {
    
    static auto op = create_can_cast_typed_handle();
    return op.redispatch(dispatchKeySet, from_, to);
}

// aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType
static C10_NOINLINE c10::TypedOperatorHandle<promote_types::schema> create_promote_types_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(promote_types::name, promote_types::overload_name)
      .typed<promote_types::schema>();
}

// aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType
at::ScalarType promote_types::call(at::ScalarType type1, at::ScalarType type2) {
    
    static auto op = create_promote_types_typed_handle();
    return op.call(type1, type2);
}

// aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType
at::ScalarType promote_types::redispatch(c10::DispatchKeySet dispatchKeySet, at::ScalarType type1, at::ScalarType type2) {
    
    static auto op = create_promote_types_typed_handle();
    return op.redispatch(dispatchKeySet, type1, type2);
}

// aten::_local_scalar_dense(Tensor self) -> Scalar
static C10_NOINLINE c10::TypedOperatorHandle<_local_scalar_dense::schema> create__local_scalar_dense_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_local_scalar_dense::name, _local_scalar_dense::overload_name)
      .typed<_local_scalar_dense::schema>();
}

// aten::_local_scalar_dense(Tensor self) -> Scalar
at::Scalar _local_scalar_dense::call(const at::Tensor & self) {
    
    static auto op = create__local_scalar_dense_typed_handle();
    return op.call(self);
}

// aten::_local_scalar_dense(Tensor self) -> Scalar
at::Scalar _local_scalar_dense::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__local_scalar_dense_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_lstm_mps::schema> create__lstm_mps_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_lstm_mps::name, _lstm_mps::overload_name)
      .typed<_lstm_mps::schema>();
}

// aten::_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _lstm_mps::call(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
    
    static auto op = create__lstm_mps_typed_handle();
    return op.call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
}

// aten::_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _lstm_mps::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
    
    static auto op = create__lstm_mps_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
}

// aten::lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])
static C10_NOINLINE c10::TypedOperatorHandle<lstm_mps_backward::schema> create_lstm_mps_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lstm_mps_backward::name, lstm_mps_backward::overload_name)
      .typed<lstm_mps_backward::schema>();
}

// aten::lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])
::std::tuple<at::Tensor,::std::vector<at::Tensor>,::std::vector<at::Tensor>> lstm_mps_backward::call(const ::std::optional<at::Tensor> & grad_y, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
    
    static auto op = create_lstm_mps_backward_typed_handle();
    return op.call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
}

// aten::lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])
::std::tuple<at::Tensor,::std::vector<at::Tensor>,::std::vector<at::Tensor>> lstm_mps_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const ::std::optional<at::Tensor> & grad_y, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
    
    static auto op = create_lstm_mps_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
}

// aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_lstm_cell::schema> create__thnn_fused_lstm_cell_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_thnn_fused_lstm_cell::name, _thnn_fused_lstm_cell::overload_name)
      .typed<_thnn_fused_lstm_cell::schema>();
}

// aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell::call(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias) {
    
    static auto op = create__thnn_fused_lstm_cell_typed_handle();
    return op.call(input_gates, hidden_gates, cx, input_bias, hidden_bias);
}

// aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias) {
    
    static auto op = create__thnn_fused_lstm_cell_typed_handle();
    return op.redispatch(dispatchKeySet, input_gates, hidden_gates, cx, input_bias, hidden_bias);
}

// aten::_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_lstm_cell_backward_impl::schema> create__thnn_fused_lstm_cell_backward_impl_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_thnn_fused_lstm_cell_backward_impl::name, _thnn_fused_lstm_cell_backward_impl::overload_name)
      .typed<_thnn_fused_lstm_cell_backward_impl::schema>();
}

// aten::_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_impl::call(const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
    
    static auto op = create__thnn_fused_lstm_cell_backward_impl_typed_handle();
    return op.call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
}

// aten::_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_impl::redispatch(c10::DispatchKeySet dispatchKeySet, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
    
    static auto op = create__thnn_fused_lstm_cell_backward_impl_typed_handle();
    return op.redispatch(dispatchKeySet, grad_hy, grad_cy, cx, cy, workspace, has_bias);
}

// aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_lstm_cell_backward::schema> create__thnn_fused_lstm_cell_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_thnn_fused_lstm_cell_backward::name, _thnn_fused_lstm_cell_backward::overload_name)
      .typed<_thnn_fused_lstm_cell_backward::schema>();
}

// aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward::call(const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
    
    static auto op = create__thnn_fused_lstm_cell_backward_typed_handle();
    return op.call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
}

// aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
    
    static auto op = create__thnn_fused_lstm_cell_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_hy, grad_cy, cx, cy, workspace, has_bias);
}

// aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_thnn_differentiable_lstm_cell_backward::schema> create__thnn_differentiable_lstm_cell_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_thnn_differentiable_lstm_cell_backward::name, _thnn_differentiable_lstm_cell_backward::overload_name)
      .typed<_thnn_differentiable_lstm_cell_backward::schema>();
}

// aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_lstm_cell_backward::call(const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) {
    
    static auto op = create__thnn_differentiable_lstm_cell_backward_typed_handle();
    return op.call(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy);
}

// aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_lstm_cell_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) {
    
    static auto op = create__thnn_differentiable_lstm_cell_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy);
}

// aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_gru_cell::schema> create__thnn_fused_gru_cell_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_thnn_fused_gru_cell::name, _thnn_fused_gru_cell::overload_name)
      .typed<_thnn_fused_gru_cell::schema>();
}

// aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _thnn_fused_gru_cell::call(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias) {
    
    static auto op = create__thnn_fused_gru_cell_typed_handle();
    return op.call(input_gates, hidden_gates, hx, input_bias, hidden_bias);
}

// aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _thnn_fused_gru_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias) {
    
    static auto op = create__thnn_fused_gru_cell_typed_handle();
    return op.redispatch(dispatchKeySet, input_gates, hidden_gates, hx, input_bias, hidden_bias);
}

// aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_gru_cell_backward::schema> create__thnn_fused_gru_cell_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_thnn_fused_gru_cell_backward::name, _thnn_fused_gru_cell_backward::overload_name)
      .typed<_thnn_fused_gru_cell_backward::schema>();
}

// aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_gru_cell_backward::call(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) {
    
    static auto op = create__thnn_fused_gru_cell_backward_typed_handle();
    return op.call(grad_hy, workspace, has_bias);
}

// aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_gru_cell_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) {
    
    static auto op = create__thnn_fused_gru_cell_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_hy, workspace, has_bias);
}

// aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_thnn_differentiable_gru_cell_backward::schema> create__thnn_differentiable_gru_cell_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_thnn_differentiable_gru_cell_backward::name, _thnn_differentiable_gru_cell_backward::overload_name)
      .typed<_thnn_differentiable_gru_cell_backward::schema>();
}

// aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_gru_cell_backward::call(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias) {
    
    static auto op = create__thnn_differentiable_gru_cell_backward_typed_handle();
    return op.call(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias);
}

// aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_gru_cell_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias) {
    
    static auto op = create__thnn_differentiable_gru_cell_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias);
}

// aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<lstm_input::schema> create_lstm_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lstm_input::name, lstm_input::overload_name)
      .typed<lstm_input::schema>();
}

// aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_input::call(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
    
    static auto op = create_lstm_input_typed_handle();
    return op.call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
}

// aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
    
    static auto op = create_lstm_input_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
}

// aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<lstm_data::schema> create_lstm_data_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lstm_data::name, lstm_data::overload_name)
      .typed<lstm_data::schema>();
}

// aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_data::call(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
    
    static auto op = create_lstm_data_typed_handle();
    return op.call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
}

// aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
    
    static auto op = create_lstm_data_typed_handle();
    return op.redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
}

// aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<gru_input::schema> create_gru_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gru_input::name, gru_input::overload_name)
      .typed<gru_input::schema>();
}

// aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> gru_input::call(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
    
    static auto op = create_gru_input_typed_handle();
    return op.call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
}

// aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> gru_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
    
    static auto op = create_gru_input_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
}

// aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<gru_data::schema> create_gru_data_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gru_data::name, gru_data::overload_name)
      .typed<gru_data::schema>();
}

// aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> gru_data::call(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
    
    static auto op = create_gru_data_typed_handle();
    return op.call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
}

// aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> gru_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
    
    static auto op = create_gru_data_typed_handle();
    return op.redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
}

// aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<rnn_tanh_input::schema> create_rnn_tanh_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rnn_tanh_input::name, rnn_tanh_input::overload_name)
      .typed<rnn_tanh_input::schema>();
}

// aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> rnn_tanh_input::call(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
    
    static auto op = create_rnn_tanh_input_typed_handle();
    return op.call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
}

// aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> rnn_tanh_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
    
    static auto op = create_rnn_tanh_input_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
}

// aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<rnn_tanh_data::schema> create_rnn_tanh_data_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rnn_tanh_data::name, rnn_tanh_data::overload_name)
      .typed<rnn_tanh_data::schema>();
}

// aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> rnn_tanh_data::call(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
    
    static auto op = create_rnn_tanh_data_typed_handle();
    return op.call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
}

// aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> rnn_tanh_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
    
    static auto op = create_rnn_tanh_data_typed_handle();
    return op.redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
}

// aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<rnn_relu_input::schema> create_rnn_relu_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rnn_relu_input::name, rnn_relu_input::overload_name)
      .typed<rnn_relu_input::schema>();
}

// aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> rnn_relu_input::call(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
    
    static auto op = create_rnn_relu_input_typed_handle();
    return op.call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
}

// aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> rnn_relu_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
    
    static auto op = create_rnn_relu_input_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
}

// aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<rnn_relu_data::schema> create_rnn_relu_data_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rnn_relu_data::name, rnn_relu_data::overload_name)
      .typed<rnn_relu_data::schema>();
}

// aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> rnn_relu_data::call(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
    
    static auto op = create_rnn_relu_data_typed_handle();
    return op.call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
}

// aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> rnn_relu_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
    
    static auto op = create_rnn_relu_data_typed_handle();
    return op.redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
}

// aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<lstm_cell::schema> create_lstm_cell_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lstm_cell::name, lstm_cell::overload_name)
      .typed<lstm_cell::schema>();
}

// aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> lstm_cell::call(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional<at::Tensor> & b_ih, const ::std::optional<at::Tensor> & b_hh) {
    
    static auto op = create_lstm_cell_typed_handle();
    return op.call(input, hx, w_ih, w_hh, b_ih, b_hh);
}

// aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> lstm_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional<at::Tensor> & b_ih, const ::std::optional<at::Tensor> & b_hh) {
    
    static auto op = create_lstm_cell_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh);
}

// aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<gru_cell::schema> create_gru_cell_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gru_cell::name, gru_cell::overload_name)
      .typed<gru_cell::schema>();
}

// aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
at::Tensor gru_cell::call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional<at::Tensor> & b_ih, const ::std::optional<at::Tensor> & b_hh) {
    
    static auto op = create_gru_cell_typed_handle();
    return op.call(input, hx, w_ih, w_hh, b_ih, b_hh);
}

// aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
at::Tensor gru_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional<at::Tensor> & b_ih, const ::std::optional<at::Tensor> & b_hh) {
    
    static auto op = create_gru_cell_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh);
}

// aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rnn_tanh_cell::schema> create_rnn_tanh_cell_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rnn_tanh_cell::name, rnn_tanh_cell::overload_name)
      .typed<rnn_tanh_cell::schema>();
}

// aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
at::Tensor rnn_tanh_cell::call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional<at::Tensor> & b_ih, const ::std::optional<at::Tensor> & b_hh) {
    
    static auto op = create_rnn_tanh_cell_typed_handle();
    return op.call(input, hx, w_ih, w_hh, b_ih, b_hh);
}

// aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
at::Tensor rnn_tanh_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional<at::Tensor> & b_ih, const ::std::optional<at::Tensor> & b_hh) {
    
    static auto op = create_rnn_tanh_cell_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh);
}

// aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rnn_relu_cell::schema> create_rnn_relu_cell_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rnn_relu_cell::name, rnn_relu_cell::overload_name)
      .typed<rnn_relu_cell::schema>();
}

// aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
at::Tensor rnn_relu_cell::call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional<at::Tensor> & b_ih, const ::std::optional<at::Tensor> & b_hh) {
    
    static auto op = create_rnn_relu_cell_typed_handle();
    return op.call(input, hx, w_ih, w_hh, b_ih, b_hh);
}

// aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
at::Tensor rnn_relu_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional<at::Tensor> & b_ih, const ::std::optional<at::Tensor> & b_hh) {
    
    static auto op = create_rnn_relu_cell_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh);
}

// aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<quantized_lstm_cell::schema> create_quantized_lstm_cell_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantized_lstm_cell::name, quantized_lstm_cell::overload_name)
      .typed<quantized_lstm_cell::schema>();
}

// aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> quantized_lstm_cell::call(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
    
    static auto op = create_quantized_lstm_cell_typed_handle();
    return op.call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
}

// aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> quantized_lstm_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
    
    static auto op = create_quantized_lstm_cell_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
}

// aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantized_gru_cell::schema> create_quantized_gru_cell_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantized_gru_cell::name, quantized_gru_cell::overload_name)
      .typed<quantized_gru_cell::schema>();
}

// aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
at::Tensor quantized_gru_cell::call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
    
    static auto op = create_quantized_gru_cell_typed_handle();
    return op.call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
}

// aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
at::Tensor quantized_gru_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
    
    static auto op = create_quantized_gru_cell_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
}

// aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantized_rnn_relu_cell::schema> create_quantized_rnn_relu_cell_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantized_rnn_relu_cell::name, quantized_rnn_relu_cell::overload_name)
      .typed<quantized_rnn_relu_cell::schema>();
}

// aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
at::Tensor quantized_rnn_relu_cell::call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
    
    static auto op = create_quantized_rnn_relu_cell_typed_handle();
    return op.call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
}

// aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
at::Tensor quantized_rnn_relu_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
    
    static auto op = create_quantized_rnn_relu_cell_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
}

// aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantized_rnn_tanh_cell::schema> create_quantized_rnn_tanh_cell_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantized_rnn_tanh_cell::name, quantized_rnn_tanh_cell::overload_name)
      .typed<quantized_rnn_tanh_cell::schema>();
}

// aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
at::Tensor quantized_rnn_tanh_cell::call(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
    
    static auto op = create_quantized_rnn_tanh_cell_typed_handle();
    return op.call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
}

// aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
at::Tensor quantized_rnn_tanh_cell::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
    
    static auto op = create_quantized_rnn_tanh_cell_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
}

// aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_pack_padded_sequence::schema> create__pack_padded_sequence_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_pack_padded_sequence::name, _pack_padded_sequence::overload_name)
      .typed<_pack_padded_sequence::schema>();
}

// aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _pack_padded_sequence::call(const at::Tensor & input, const at::Tensor & lengths, bool batch_first) {
    
    static auto op = create__pack_padded_sequence_typed_handle();
    return op.call(input, lengths, batch_first);
}

// aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _pack_padded_sequence::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first) {
    
    static auto op = create__pack_padded_sequence_typed_handle();
    return op.redispatch(dispatchKeySet, input, lengths, batch_first);
}

// aten::_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_pack_padded_sequence_backward::schema> create__pack_padded_sequence_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_pack_padded_sequence_backward::name, _pack_padded_sequence_backward::overload_name)
      .typed<_pack_padded_sequence_backward::schema>();
}

// aten::_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor
at::Tensor _pack_padded_sequence_backward::call(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
    
    static auto op = create__pack_padded_sequence_backward_typed_handle();
    return op.call(grad, input_size, batch_sizes, batch_first);
}

// aten::_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor
at::Tensor _pack_padded_sequence_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
    
    static auto op = create__pack_padded_sequence_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, input_size, batch_sizes, batch_first);
}

// aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_pad_packed_sequence::schema> create__pad_packed_sequence_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_pad_packed_sequence::name, _pad_packed_sequence::overload_name)
      .typed<_pad_packed_sequence::schema>();
}

// aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _pad_packed_sequence::call(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) {
    
    static auto op = create__pad_packed_sequence_typed_handle();
    return op.call(data, batch_sizes, batch_first, padding_value, total_length);
}

// aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _pad_packed_sequence::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) {
    
    static auto op = create__pad_packed_sequence_typed_handle();
    return op.redispatch(dispatchKeySet, data, batch_sizes, batch_first, padding_value, total_length);
}

// aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set__source_Storage::schema> create_set__source_Storage_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set__source_Storage::name, set__source_Storage::overload_name)
      .typed<set__source_Storage::schema>();
}

// aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)
at::Tensor & set__source_Storage::call(at::Tensor & self, at::Storage source) {
    
    static auto op = create_set__source_Storage_typed_handle();
    return op.call(self, source);
}

// aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)
at::Tensor & set__source_Storage::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source) {
    
    static auto op = create_set__source_Storage_typed_handle();
    return op.redispatch(dispatchKeySet, self, source);
}

// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set__source_Storage_storage_offset::schema> create_set__source_Storage_storage_offset_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set__source_Storage_storage_offset::name, set__source_Storage_storage_offset::overload_name)
      .typed<set__source_Storage_storage_offset::schema>();
}

// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
at::Tensor & set__source_Storage_storage_offset::call(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create_set__source_Storage_storage_offset_typed_handle();
    return op.call(self, source, storage_offset, size, stride);
}

// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
at::Tensor & set__source_Storage_storage_offset::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create_set__source_Storage_storage_offset_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, storage_offset, size, stride);
}

// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set__source_Tensor_storage_offset::schema> create_set__source_Tensor_storage_offset_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set__source_Tensor_storage_offset::name, set__source_Tensor_storage_offset::overload_name)
      .typed<set__source_Tensor_storage_offset::schema>();
}

// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
at::Tensor & set__source_Tensor_storage_offset::call(at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create_set__source_Tensor_storage_offset_typed_handle();
    return op.call(self, source, storage_offset, size, stride);
}

// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)
at::Tensor & set__source_Tensor_storage_offset::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create_set__source_Tensor_storage_offset_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, storage_offset, size, stride);
}

// aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set__source_Tensor::schema> create_set__source_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set__source_Tensor::name, set__source_Tensor::overload_name)
      .typed<set__source_Tensor::schema>();
}

// aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)
at::Tensor & set__source_Tensor::call(at::Tensor & self, const at::Tensor & source) {
    
    static auto op = create_set__source_Tensor_typed_handle();
    return op.call(self, source);
}

// aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)
at::Tensor & set__source_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & source) {
    
    static auto op = create_set__source_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, source);
}

// aten::set_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set_::schema> create_set__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_::name, set_::overload_name)
      .typed<set_::schema>();
}

// aten::set_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & set_::call(at::Tensor & self) {
    
    static auto op = create_set__typed_handle();
    return op.call(self);
}

// aten::set_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & set_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_set__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::lift(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<lift::schema> create_lift_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lift::name, lift::overload_name)
      .typed<lift::schema>();
}

// aten::lift(Tensor self) -> Tensor
at::Tensor lift::call(const at::Tensor & self) {
    
    static auto op = create_lift_typed_handle();
    return op.call(self);
}

// aten::lift(Tensor self) -> Tensor
at::Tensor lift::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_lift_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::lift_fresh(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<lift_fresh::schema> create_lift_fresh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lift_fresh::name, lift_fresh::overload_name)
      .typed<lift_fresh::schema>();
}

// aten::lift_fresh(Tensor(a) self) -> Tensor(a)
at::Tensor lift_fresh::call(const at::Tensor & self) {
    
    static auto op = create_lift_fresh_typed_handle();
    return op.call(self);
}

// aten::lift_fresh(Tensor(a) self) -> Tensor(a)
at::Tensor lift_fresh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_lift_fresh_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::lift_fresh_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<lift_fresh_copy::schema> create_lift_fresh_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lift_fresh_copy::name, lift_fresh_copy::overload_name)
      .typed<lift_fresh_copy::schema>();
}

// aten::lift_fresh_copy(Tensor self) -> Tensor
at::Tensor lift_fresh_copy::call(const at::Tensor & self) {
    
    static auto op = create_lift_fresh_copy_typed_handle();
    return op.call(self);
}

// aten::lift_fresh_copy(Tensor self) -> Tensor
at::Tensor lift_fresh_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_lift_fresh_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::is_set_to(Tensor self, Tensor tensor) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<is_set_to::schema> create_is_set_to_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(is_set_to::name, is_set_to::overload_name)
      .typed<is_set_to::schema>();
}

// aten::is_set_to(Tensor self, Tensor tensor) -> bool
bool is_set_to::call(const at::Tensor & self, const at::Tensor & tensor) {
    
    static auto op = create_is_set_to_typed_handle();
    return op.call(self, tensor);
}

// aten::is_set_to(Tensor self, Tensor tensor) -> bool
bool is_set_to::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor) {
    
    static auto op = create_is_set_to_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor);
}

// aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<masked_fill__Scalar::schema> create_masked_fill__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_fill__Scalar::name, masked_fill__Scalar::overload_name)
      .typed<masked_fill__Scalar::schema>();
}

// aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)
at::Tensor & masked_fill__Scalar::call(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
    
    static auto op = create_masked_fill__Scalar_typed_handle();
    return op.call(self, mask, value);
}

// aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)
at::Tensor & masked_fill__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
    
    static auto op = create_masked_fill__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, value);
}

// aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<masked_fill_Scalar::schema> create_masked_fill_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_fill_Scalar::name, masked_fill_Scalar::overload_name)
      .typed<masked_fill_Scalar::schema>();
}

// aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
at::Tensor masked_fill_Scalar::call(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
    
    static auto op = create_masked_fill_Scalar_typed_handle();
    return op.call(self, mask, value);
}

// aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
at::Tensor masked_fill_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
    
    static auto op = create_masked_fill_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, value);
}

// aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<masked_fill__Tensor::schema> create_masked_fill__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_fill__Tensor::name, masked_fill__Tensor::overload_name)
      .typed<masked_fill__Tensor::schema>();
}

// aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)
at::Tensor & masked_fill__Tensor::call(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
    
    static auto op = create_masked_fill__Tensor_typed_handle();
    return op.call(self, mask, value);
}

// aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)
at::Tensor & masked_fill__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
    
    static auto op = create_masked_fill__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, value);
}

// aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<masked_fill_Tensor::schema> create_masked_fill_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_fill_Tensor::name, masked_fill_Tensor::overload_name)
      .typed<masked_fill_Tensor::schema>();
}

// aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor
at::Tensor masked_fill_Tensor::call(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
    
    static auto op = create_masked_fill_Tensor_typed_handle();
    return op.call(self, mask, value);
}

// aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor
at::Tensor masked_fill_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
    
    static auto op = create_masked_fill_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, value);
}

// aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<masked_scatter_::schema> create_masked_scatter__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_scatter_::name, masked_scatter_::overload_name)
      .typed<masked_scatter_::schema>();
}

// aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)
at::Tensor & masked_scatter_::call(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
    
    static auto op = create_masked_scatter__typed_handle();
    return op.call(self, mask, source);
}

// aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)
at::Tensor & masked_scatter_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
    
    static auto op = create_masked_scatter__typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, source);
}

// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<masked_scatter::schema> create_masked_scatter_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_scatter::name, masked_scatter::overload_name)
      .typed<masked_scatter::schema>();
}

// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor
at::Tensor masked_scatter::call(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
    
    static auto op = create_masked_scatter_typed_handle();
    return op.call(self, mask, source);
}

// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor
at::Tensor masked_scatter::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
    
    static auto op = create_masked_scatter_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, source);
}

// aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<masked_scatter_backward::schema> create_masked_scatter_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_scatter_backward::name, masked_scatter_backward::overload_name)
      .typed<masked_scatter_backward::schema>();
}

// aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor
at::Tensor masked_scatter_backward::call(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes) {
    
    static auto op = create_masked_scatter_backward_typed_handle();
    return op.call(grad_output, mask, sizes);
}

// aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor
at::Tensor masked_scatter_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes) {
    
    static auto op = create_masked_scatter_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, mask, sizes);
}

// aten::_masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_masked_softmax::schema> create__masked_softmax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_masked_softmax::name, _masked_softmax::overload_name)
      .typed<_masked_softmax::schema>();
}

// aten::_masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor
at::Tensor _masked_softmax::call(const at::Tensor & self, const at::Tensor & mask, ::std::optional<int64_t> dim, ::std::optional<int64_t> mask_type) {
    
    static auto op = create__masked_softmax_typed_handle();
    return op.call(self, mask, dim, mask_type);
}

// aten::_masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor
at::Tensor _masked_softmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, ::std::optional<int64_t> dim, ::std::optional<int64_t> mask_type) {
    
    static auto op = create__masked_softmax_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, dim, mask_type);
}

// aten::_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_masked_softmax_backward::schema> create__masked_softmax_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_masked_softmax_backward::name, _masked_softmax_backward::overload_name)
      .typed<_masked_softmax_backward::schema>();
}

// aten::_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor
at::Tensor _masked_softmax_backward::call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, ::std::optional<int64_t> dim) {
    
    static auto op = create__masked_softmax_backward_typed_handle();
    return op.call(grad_output, output, mask, dim);
}

// aten::_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor
at::Tensor _masked_softmax_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, ::std::optional<int64_t> dim) {
    
    static auto op = create__masked_softmax_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, mask, dim);
}

// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<view::schema> create_view_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view::name, view::overload_name)
      .typed<view::schema>();
}

// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)
at::Tensor view::call(const at::Tensor & self, c10::SymIntArrayRef size) {
    
    static auto op = create_view_typed_handle();
    return op.call(self, size);
}

// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)
at::Tensor view::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) {
    
    static auto op = create_view_typed_handle();
    return op.redispatch(dispatchKeySet, self, size);
}

// aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<view_dtype::schema> create_view_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_dtype::name, view_dtype::overload_name)
      .typed<view_dtype::schema>();
}

// aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)
at::Tensor view_dtype::call(const at::Tensor & self, at::ScalarType dtype) {
    
    static auto op = create_view_dtype_typed_handle();
    return op.call(self, dtype);
}

// aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)
at::Tensor view_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype) {
    
    static auto op = create_view_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype);
}

// aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<put_::schema> create_put__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(put_::name, put_::overload_name)
      .typed<put_::schema>();
}

// aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)
at::Tensor & put_::call(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
    
    static auto op = create_put__typed_handle();
    return op.call(self, index, source, accumulate);
}

// aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)
at::Tensor & put_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
    
    static auto op = create_put__typed_handle();
    return op.redispatch(dispatchKeySet, self, index, source, accumulate);
}

// aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<put::schema> create_put_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(put::name, put::overload_name)
      .typed<put::schema>();
}

// aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor
at::Tensor put::call(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
    
    static auto op = create_put_typed_handle();
    return op.call(self, index, source, accumulate);
}

// aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor
at::Tensor put::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
    
    static auto op = create_put_typed_handle();
    return op.redispatch(dispatchKeySet, self, index, source, accumulate);
}

// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_add_out::schema> create_index_add_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_add_out::name, index_add_out::overload_name)
      .typed<index_add_out::schema>();
}

// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_add_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_index_add_out_typed_handle();
    return op.call(self, dim, index, source, alpha, out);
}

// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_add_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_index_add_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, source, alpha, out);
}

// aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_add_::schema> create_index_add__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_add_::name, index_add_::overload_name)
      .typed<index_add_::schema>();
}

// aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)
at::Tensor & index_add_::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
    
    static auto op = create_index_add__typed_handle();
    return op.call(self, dim, index, source, alpha);
}

// aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)
at::Tensor & index_add_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
    
    static auto op = create_index_add__typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, source, alpha);
}

// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<index_add::schema> create_index_add_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_add::name, index_add::overload_name)
      .typed<index_add::schema>();
}

// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
at::Tensor index_add::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
    
    static auto op = create_index_add_typed_handle();
    return op.call(self, dim, index, source, alpha);
}

// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
at::Tensor index_add::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
    
    static auto op = create_index_add_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, source, alpha);
}

// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<index_add_dimname::schema> create_index_add_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_add_dimname::name, index_add_dimname::overload_name)
      .typed<index_add_dimname::schema>();
}

// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
at::Tensor index_add_dimname::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
    
    static auto op = create_index_add_dimname_typed_handle();
    return op.call(self, dim, index, source, alpha);
}

// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
at::Tensor index_add_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
    
    static auto op = create_index_add_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, source, alpha);
}

// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_reduce_out::schema> create_index_reduce_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_reduce_out::name, index_reduce_out::overload_name)
      .typed<index_reduce_out::schema>();
}

// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_reduce_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out) {
    
    static auto op = create_index_reduce_out_typed_handle();
    return op.call(self, dim, index, source, reduce, include_self, out);
}

// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_reduce_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out) {
    
    static auto op = create_index_reduce_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, source, reduce, include_self, out);
}

// aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_reduce_::schema> create_index_reduce__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_reduce_::name, index_reduce_::overload_name)
      .typed<index_reduce_::schema>();
}

// aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)
at::Tensor & index_reduce_::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
    
    static auto op = create_index_reduce__typed_handle();
    return op.call(self, dim, index, source, reduce, include_self);
}

// aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)
at::Tensor & index_reduce_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
    
    static auto op = create_index_reduce__typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, source, reduce, include_self);
}

// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<index_reduce::schema> create_index_reduce_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_reduce::name, index_reduce::overload_name)
      .typed<index_reduce::schema>();
}

// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor
at::Tensor index_reduce::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
    
    static auto op = create_index_reduce_typed_handle();
    return op.call(self, dim, index, source, reduce, include_self);
}

// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor
at::Tensor index_reduce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
    
    static auto op = create_index_reduce_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, source, reduce, include_self);
}

// aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_fill__int_Scalar::schema> create_index_fill__int_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_fill__int_Scalar::name, index_fill__int_Scalar::overload_name)
      .typed<index_fill__int_Scalar::schema>();
}

// aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
at::Tensor & index_fill__int_Scalar::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_index_fill__int_Scalar_typed_handle();
    return op.call(self, dim, index, value);
}

// aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
at::Tensor & index_fill__int_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_index_fill__int_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value);
}

// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<index_fill_int_Scalar::schema> create_index_fill_int_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_fill_int_Scalar::name, index_fill_int_Scalar::overload_name)
      .typed<index_fill_int_Scalar::schema>();
}

// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
at::Tensor index_fill_int_Scalar::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_index_fill_int_Scalar_typed_handle();
    return op.call(self, dim, index, value);
}

// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
at::Tensor index_fill_int_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_index_fill_int_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value);
}

// aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_fill__int_Tensor::schema> create_index_fill__int_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_fill__int_Tensor::name, index_fill__int_Tensor::overload_name)
      .typed<index_fill__int_Tensor::schema>();
}

// aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
at::Tensor & index_fill__int_Tensor::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
    
    static auto op = create_index_fill__int_Tensor_typed_handle();
    return op.call(self, dim, index, value);
}

// aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
at::Tensor & index_fill__int_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
    
    static auto op = create_index_fill__int_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value);
}

// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<index_fill_int_Tensor::schema> create_index_fill_int_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_fill_int_Tensor::name, index_fill_int_Tensor::overload_name)
      .typed<index_fill_int_Tensor::schema>();
}

// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor
at::Tensor index_fill_int_Tensor::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
    
    static auto op = create_index_fill_int_Tensor_typed_handle();
    return op.call(self, dim, index, value);
}

// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor
at::Tensor index_fill_int_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
    
    static auto op = create_index_fill_int_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value);
}

// aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_fill__Dimname_Scalar::schema> create_index_fill__Dimname_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_fill__Dimname_Scalar::name, index_fill__Dimname_Scalar::overload_name)
      .typed<index_fill__Dimname_Scalar::schema>();
}

// aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)
at::Tensor & index_fill__Dimname_Scalar::call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_index_fill__Dimname_Scalar_typed_handle();
    return op.call(self, dim, index, value);
}

// aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)
at::Tensor & index_fill__Dimname_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_index_fill__Dimname_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value);
}

// aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_fill__Dimname_Tensor::schema> create_index_fill__Dimname_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_fill__Dimname_Tensor::name, index_fill__Dimname_Tensor::overload_name)
      .typed<index_fill__Dimname_Tensor::schema>();
}

// aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)
at::Tensor & index_fill__Dimname_Tensor::call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
    
    static auto op = create_index_fill__Dimname_Tensor_typed_handle();
    return op.call(self, dim, index, value);
}

// aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)
at::Tensor & index_fill__Dimname_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
    
    static auto op = create_index_fill__Dimname_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value);
}

// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<index_fill_Dimname_Scalar::schema> create_index_fill_Dimname_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_fill_Dimname_Scalar::name, index_fill_Dimname_Scalar::overload_name)
      .typed<index_fill_Dimname_Scalar::schema>();
}

// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
at::Tensor index_fill_Dimname_Scalar::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_index_fill_Dimname_Scalar_typed_handle();
    return op.call(self, dim, index, value);
}

// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
at::Tensor index_fill_Dimname_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_index_fill_Dimname_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value);
}

// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<index_fill_Dimname_Tensor::schema> create_index_fill_Dimname_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_fill_Dimname_Tensor::name, index_fill_Dimname_Tensor::overload_name)
      .typed<index_fill_Dimname_Tensor::schema>();
}

// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
at::Tensor index_fill_Dimname_Tensor::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
    
    static auto op = create_index_fill_Dimname_Tensor_typed_handle();
    return op.call(self, dim, index, value);
}

// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
at::Tensor index_fill_Dimname_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
    
    static auto op = create_index_fill_Dimname_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value);
}

// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_src::schema> create_scatter_src_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_src::name, scatter_src::overload_name)
      .typed<scatter_src::schema>();
}

// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
at::Tensor scatter_src::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter_src_typed_handle();
    return op.call(self, dim, index, src);
}

// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
at::Tensor scatter_src::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter_src_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src);
}

// aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter__src::schema> create_scatter__src_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter__src::name, scatter__src::overload_name)
      .typed<scatter__src::schema>();
}

// aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
at::Tensor & scatter__src::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter__src_typed_handle();
    return op.call(self, dim, index, src);
}

// aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
at::Tensor & scatter__src::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter__src_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src);
}

// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter_src_out::schema> create_scatter_src_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_src_out::name, scatter_src_out::overload_name)
      .typed<scatter_src_out::schema>();
}

// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_src_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
    
    static auto op = create_scatter_src_out_typed_handle();
    return op.call(self, dim, index, src, out);
}

// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_src_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
    
    static auto op = create_scatter_src_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src, out);
}

// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_value::schema> create_scatter_value_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_value::name, scatter_value::overload_name)
      .typed<scatter_value::schema>();
}

// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
at::Tensor scatter_value::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_scatter_value_typed_handle();
    return op.call(self, dim, index, value);
}

// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
at::Tensor scatter_value::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_scatter_value_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value);
}

// aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter__value::schema> create_scatter__value_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter__value::name, scatter__value::overload_name)
      .typed<scatter__value::schema>();
}

// aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
at::Tensor & scatter__value::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_scatter__value_typed_handle();
    return op.call(self, dim, index, value);
}

// aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
at::Tensor & scatter__value::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_scatter__value_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value);
}

// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter_value_out::schema> create_scatter_value_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_value_out::name, scatter_value_out::overload_name)
      .typed<scatter_value_out::schema>();
}

// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_value_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_scatter_value_out_typed_handle();
    return op.call(self, dim, index, value, out);
}

// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_value_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_scatter_value_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value, out);
}

// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_reduce::schema> create_scatter_reduce_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_reduce::name, scatter_reduce::overload_name)
      .typed<scatter_reduce::schema>();
}

// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor
at::Tensor scatter_reduce::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
    
    static auto op = create_scatter_reduce_typed_handle();
    return op.call(self, dim, index, src, reduce);
}

// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor
at::Tensor scatter_reduce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
    
    static auto op = create_scatter_reduce_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src, reduce);
}

// aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter__reduce::schema> create_scatter__reduce_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter__reduce::name, scatter__reduce::overload_name)
      .typed<scatter__reduce::schema>();
}

// aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)
at::Tensor & scatter__reduce::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
    
    static auto op = create_scatter__reduce_typed_handle();
    return op.call(self, dim, index, src, reduce);
}

// aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)
at::Tensor & scatter__reduce::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
    
    static auto op = create_scatter__reduce_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src, reduce);
}

// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter_reduce_out::schema> create_scatter_reduce_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_reduce_out::name, scatter_reduce_out::overload_name)
      .typed<scatter_reduce_out::schema>();
}

// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_reduce_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out) {
    
    static auto op = create_scatter_reduce_out_typed_handle();
    return op.call(self, dim, index, src, reduce, out);
}

// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_reduce_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out) {
    
    static auto op = create_scatter_reduce_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src, reduce, out);
}

// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_value_reduce::schema> create_scatter_value_reduce_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_value_reduce::name, scatter_value_reduce::overload_name)
      .typed<scatter_value_reduce::schema>();
}

// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor
at::Tensor scatter_value_reduce::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
    
    static auto op = create_scatter_value_reduce_typed_handle();
    return op.call(self, dim, index, value, reduce);
}

// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor
at::Tensor scatter_value_reduce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
    
    static auto op = create_scatter_value_reduce_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value, reduce);
}

// aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter__value_reduce::schema> create_scatter__value_reduce_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter__value_reduce::name, scatter__value_reduce::overload_name)
      .typed<scatter__value_reduce::schema>();
}

// aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)
at::Tensor & scatter__value_reduce::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
    
    static auto op = create_scatter__value_reduce_typed_handle();
    return op.call(self, dim, index, value, reduce);
}

// aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)
at::Tensor & scatter__value_reduce::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
    
    static auto op = create_scatter__value_reduce_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value, reduce);
}

// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter_value_reduce_out::schema> create_scatter_value_reduce_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_value_reduce_out::name, scatter_value_reduce_out::overload_name)
      .typed<scatter_value_reduce_out::schema>();
}

// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_value_reduce_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out) {
    
    static auto op = create_scatter_value_reduce_out_typed_handle();
    return op.call(self, dim, index, value, reduce, out);
}

// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_value_reduce_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out) {
    
    static auto op = create_scatter_value_reduce_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value, reduce, out);
}

// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_dimname_src::schema> create_scatter_dimname_src_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_dimname_src::name, scatter_dimname_src::overload_name)
      .typed<scatter_dimname_src::schema>();
}

// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
at::Tensor scatter_dimname_src::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter_dimname_src_typed_handle();
    return op.call(self, dim, index, src);
}

// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
at::Tensor scatter_dimname_src::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter_dimname_src_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src);
}

// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_dimname_value::schema> create_scatter_dimname_value_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_dimname_value::name, scatter_dimname_value::overload_name)
      .typed<scatter_dimname_value::schema>();
}

// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
at::Tensor scatter_dimname_value::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_scatter_dimname_value_typed_handle();
    return op.call(self, dim, index, value);
}

// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
at::Tensor scatter_dimname_value::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
    
    static auto op = create_scatter_dimname_value_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value);
}

// aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_add::schema> create_scatter_add_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_add::name, scatter_add::overload_name)
      .typed<scatter_add::schema>();
}

// aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
at::Tensor scatter_add::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter_add_typed_handle();
    return op.call(self, dim, index, src);
}

// aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
at::Tensor scatter_add::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter_add_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src);
}

// aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter_add_::schema> create_scatter_add__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_add_::name, scatter_add_::overload_name)
      .typed<scatter_add_::schema>();
}

// aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
at::Tensor & scatter_add_::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter_add__typed_handle();
    return op.call(self, dim, index, src);
}

// aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
at::Tensor & scatter_add_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter_add__typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src);
}

// aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter_add_out::schema> create_scatter_add_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_add_out::name, scatter_add_out::overload_name)
      .typed<scatter_add_out::schema>();
}

// aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_add_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
    
    static auto op = create_scatter_add_out_typed_handle();
    return op.call(self, dim, index, src, out);
}

// aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_add_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
    
    static auto op = create_scatter_add_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src, out);
}

// aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_add_dimname::schema> create_scatter_add_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_add_dimname::name, scatter_add_dimname::overload_name)
      .typed<scatter_add_dimname::schema>();
}

// aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
at::Tensor scatter_add_dimname::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter_add_dimname_typed_handle();
    return op.call(self, dim, index, src);
}

// aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
at::Tensor scatter_add_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
    
    static auto op = create_scatter_add_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src);
}

// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scatter_reduce_two::schema> create_scatter_reduce_two_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_reduce_two::name, scatter_reduce_two::overload_name)
      .typed<scatter_reduce_two::schema>();
}

// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor
at::Tensor scatter_reduce_two::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
    
    static auto op = create_scatter_reduce_two_typed_handle();
    return op.call(self, dim, index, src, reduce, include_self);
}

// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor
at::Tensor scatter_reduce_two::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
    
    static auto op = create_scatter_reduce_two_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src, reduce, include_self);
}

// aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter_reduce__two::schema> create_scatter_reduce__two_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_reduce__two::name, scatter_reduce__two::overload_name)
      .typed<scatter_reduce__two::schema>();
}

// aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)
at::Tensor & scatter_reduce__two::call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
    
    static auto op = create_scatter_reduce__two_typed_handle();
    return op.call(self, dim, index, src, reduce, include_self);
}

// aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)
at::Tensor & scatter_reduce__two::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
    
    static auto op = create_scatter_reduce__two_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src, reduce, include_self);
}

// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scatter_reduce_two_out::schema> create_scatter_reduce_two_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scatter_reduce_two_out::name, scatter_reduce_two_out::overload_name)
      .typed<scatter_reduce_two_out::schema>();
}

// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_reduce_two_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out) {
    
    static auto op = create_scatter_reduce_two_out_typed_handle();
    return op.call(self, dim, index, src, reduce, include_self, out);
}

// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scatter_reduce_two_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out) {
    
    static auto op = create_scatter_reduce_two_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, src, reduce, include_self, out);
}

// aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<eq__Scalar::schema> create_eq__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(eq__Scalar::name, eq__Scalar::overload_name)
      .typed<eq__Scalar::schema>();
}

// aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & eq__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_eq__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & eq__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_eq__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<eq__Tensor::schema> create_eq__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(eq__Tensor::name, eq__Tensor::overload_name)
      .typed<eq__Tensor::schema>();
}

// aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & eq__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_eq__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & eq__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_eq__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and_Tensor_out::schema> create_bitwise_and_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_and_Tensor_out::name, bitwise_and_Tensor_out::overload_name)
      .typed<bitwise_and_Tensor_out::schema>();
}

// aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_and_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_and_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_and_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_and_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and_Scalar_out::schema> create_bitwise_and_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_and_Scalar_out::name, bitwise_and_Scalar_out::overload_name)
      .typed<bitwise_and_Scalar_out::schema>();
}

// aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_and_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_bitwise_and_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_and_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_bitwise_and_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and_Scalar::schema> create_bitwise_and_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_and_Scalar::name, bitwise_and_Scalar::overload_name)
      .typed<bitwise_and_Scalar::schema>();
}

// aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor bitwise_and_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_and_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor bitwise_and_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_and_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and_Scalar_Tensor::schema> create_bitwise_and_Scalar_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_and_Scalar_Tensor::name, bitwise_and_Scalar_Tensor::overload_name)
      .typed<bitwise_and_Scalar_Tensor::schema>();
}

// aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
at::Tensor bitwise_and_Scalar_Tensor::call(const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_and_Scalar_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
at::Tensor bitwise_and_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_and_Scalar_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and_Tensor::schema> create_bitwise_and_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_and_Tensor::name, bitwise_and_Tensor::overload_name)
      .typed<bitwise_and_Tensor::schema>();
}

// aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor bitwise_and_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_and_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor bitwise_and_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_and_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and__Scalar::schema> create_bitwise_and__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_and__Scalar::name, bitwise_and__Scalar::overload_name)
      .typed<bitwise_and__Scalar::schema>();
}

// aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & bitwise_and__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_and__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & bitwise_and__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_and__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and__Tensor::schema> create_bitwise_and__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_and__Tensor::name, bitwise_and__Tensor::overload_name)
      .typed<bitwise_and__Tensor::schema>();
}

// aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & bitwise_and__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_and__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & bitwise_and__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_and__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<__and___Scalar::schema> create___and___Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__and___Scalar::name, __and___Scalar::overload_name)
      .typed<__and___Scalar::schema>();
}

// aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor __and___Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___and___Scalar_typed_handle();
    return op.call(self, other);
}

// aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor __and___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___and___Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<__and___Tensor::schema> create___and___Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__and___Tensor::name, __and___Tensor::overload_name)
      .typed<__and___Tensor::schema>();
}

// aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor __and___Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___and___Tensor_typed_handle();
    return op.call(self, other);
}

// aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor __and___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___and___Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__iand___Scalar::schema> create___iand___Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__iand___Scalar::name, __iand___Scalar::overload_name)
      .typed<__iand___Scalar::schema>();
}

// aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & __iand___Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___iand___Scalar_typed_handle();
    return op.call(self, other);
}

// aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & __iand___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___iand___Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__iand___Tensor::schema> create___iand___Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__iand___Tensor::name, __iand___Tensor::overload_name)
      .typed<__iand___Tensor::schema>();
}

// aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & __iand___Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___iand___Tensor_typed_handle();
    return op.call(self, other);
}

// aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & __iand___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___iand___Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or_Tensor_out::schema> create_bitwise_or_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_or_Tensor_out::name, bitwise_or_Tensor_out::overload_name)
      .typed<bitwise_or_Tensor_out::schema>();
}

// aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_or_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_or_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_or_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_or_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or_Scalar_out::schema> create_bitwise_or_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_or_Scalar_out::name, bitwise_or_Scalar_out::overload_name)
      .typed<bitwise_or_Scalar_out::schema>();
}

// aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_or_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_bitwise_or_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_or_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_bitwise_or_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or_Scalar::schema> create_bitwise_or_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_or_Scalar::name, bitwise_or_Scalar::overload_name)
      .typed<bitwise_or_Scalar::schema>();
}

// aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor bitwise_or_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_or_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor bitwise_or_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_or_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or_Scalar_Tensor::schema> create_bitwise_or_Scalar_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_or_Scalar_Tensor::name, bitwise_or_Scalar_Tensor::overload_name)
      .typed<bitwise_or_Scalar_Tensor::schema>();
}

// aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
at::Tensor bitwise_or_Scalar_Tensor::call(const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_or_Scalar_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
at::Tensor bitwise_or_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_or_Scalar_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or_Tensor::schema> create_bitwise_or_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_or_Tensor::name, bitwise_or_Tensor::overload_name)
      .typed<bitwise_or_Tensor::schema>();
}

// aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor bitwise_or_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_or_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor bitwise_or_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_or_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or__Scalar::schema> create_bitwise_or__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_or__Scalar::name, bitwise_or__Scalar::overload_name)
      .typed<bitwise_or__Scalar::schema>();
}

// aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & bitwise_or__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_or__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & bitwise_or__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_or__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or__Tensor::schema> create_bitwise_or__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_or__Tensor::name, bitwise_or__Tensor::overload_name)
      .typed<bitwise_or__Tensor::schema>();
}

// aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & bitwise_or__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_or__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & bitwise_or__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_or__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<__or___Scalar::schema> create___or___Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__or___Scalar::name, __or___Scalar::overload_name)
      .typed<__or___Scalar::schema>();
}

// aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor __or___Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___or___Scalar_typed_handle();
    return op.call(self, other);
}

// aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor __or___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___or___Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<__or___Tensor::schema> create___or___Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__or___Tensor::name, __or___Tensor::overload_name)
      .typed<__or___Tensor::schema>();
}

// aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor __or___Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___or___Tensor_typed_handle();
    return op.call(self, other);
}

// aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor __or___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___or___Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__ior___Scalar::schema> create___ior___Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__ior___Scalar::name, __ior___Scalar::overload_name)
      .typed<__ior___Scalar::schema>();
}

// aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & __ior___Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___ior___Scalar_typed_handle();
    return op.call(self, other);
}

// aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & __ior___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___ior___Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__ior___Tensor::schema> create___ior___Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__ior___Tensor::name, __ior___Tensor::overload_name)
      .typed<__ior___Tensor::schema>();
}

// aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & __ior___Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___ior___Tensor_typed_handle();
    return op.call(self, other);
}

// aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & __ior___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___ior___Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor_Tensor_out::schema> create_bitwise_xor_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_xor_Tensor_out::name, bitwise_xor_Tensor_out::overload_name)
      .typed<bitwise_xor_Tensor_out::schema>();
}

// aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_xor_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_xor_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_xor_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_xor_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor_Scalar_out::schema> create_bitwise_xor_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_xor_Scalar_out::name, bitwise_xor_Scalar_out::overload_name)
      .typed<bitwise_xor_Scalar_out::schema>();
}

// aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_xor_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_bitwise_xor_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_xor_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_bitwise_xor_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor_Scalar::schema> create_bitwise_xor_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_xor_Scalar::name, bitwise_xor_Scalar::overload_name)
      .typed<bitwise_xor_Scalar::schema>();
}

// aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor bitwise_xor_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_xor_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor bitwise_xor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_xor_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor_Scalar_Tensor::schema> create_bitwise_xor_Scalar_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_xor_Scalar_Tensor::name, bitwise_xor_Scalar_Tensor::overload_name)
      .typed<bitwise_xor_Scalar_Tensor::schema>();
}

// aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
at::Tensor bitwise_xor_Scalar_Tensor::call(const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_xor_Scalar_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
at::Tensor bitwise_xor_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_xor_Scalar_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor_Tensor::schema> create_bitwise_xor_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_xor_Tensor::name, bitwise_xor_Tensor::overload_name)
      .typed<bitwise_xor_Tensor::schema>();
}

// aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor bitwise_xor_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_xor_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor bitwise_xor_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_xor_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor__Scalar::schema> create_bitwise_xor__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_xor__Scalar::name, bitwise_xor__Scalar::overload_name)
      .typed<bitwise_xor__Scalar::schema>();
}

// aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & bitwise_xor__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_xor__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & bitwise_xor__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_xor__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor__Tensor::schema> create_bitwise_xor__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_xor__Tensor::name, bitwise_xor__Tensor::overload_name)
      .typed<bitwise_xor__Tensor::schema>();
}

// aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & bitwise_xor__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_xor__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & bitwise_xor__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_xor__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<__xor___Scalar::schema> create___xor___Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__xor___Scalar::name, __xor___Scalar::overload_name)
      .typed<__xor___Scalar::schema>();
}

// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor __xor___Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___xor___Scalar_typed_handle();
    return op.call(self, other);
}

// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor __xor___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___xor___Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<__xor___Tensor::schema> create___xor___Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__xor___Tensor::name, __xor___Tensor::overload_name)
      .typed<__xor___Tensor::schema>();
}

// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor __xor___Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___xor___Tensor_typed_handle();
    return op.call(self, other);
}

// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor __xor___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___xor___Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__ixor___Scalar::schema> create___ixor___Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__ixor___Scalar::name, __ixor___Scalar::overload_name)
      .typed<__ixor___Scalar::schema>();
}

// aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & __ixor___Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___ixor___Scalar_typed_handle();
    return op.call(self, other);
}

// aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & __ixor___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___ixor___Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__ixor___Tensor::schema> create___ixor___Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__ixor___Tensor::name, __ixor___Tensor::overload_name)
      .typed<__ixor___Tensor::schema>();
}

// aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & __ixor___Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___ixor___Tensor_typed_handle();
    return op.call(self, other);
}

// aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & __ixor___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___ixor___Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<__lshift___Scalar::schema> create___lshift___Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__lshift___Scalar::name, __lshift___Scalar::overload_name)
      .typed<__lshift___Scalar::schema>();
}

// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor __lshift___Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___lshift___Scalar_typed_handle();
    return op.call(self, other);
}

// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor __lshift___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___lshift___Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<__lshift___Tensor::schema> create___lshift___Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__lshift___Tensor::name, __lshift___Tensor::overload_name)
      .typed<__lshift___Tensor::schema>();
}

// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor __lshift___Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___lshift___Tensor_typed_handle();
    return op.call(self, other);
}

// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor __lshift___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___lshift___Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__ilshift___Scalar::schema> create___ilshift___Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__ilshift___Scalar::name, __ilshift___Scalar::overload_name)
      .typed<__ilshift___Scalar::schema>();
}

// aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & __ilshift___Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___ilshift___Scalar_typed_handle();
    return op.call(self, other);
}

// aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & __ilshift___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___ilshift___Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__ilshift___Tensor::schema> create___ilshift___Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__ilshift___Tensor::name, __ilshift___Tensor::overload_name)
      .typed<__ilshift___Tensor::schema>();
}

// aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & __ilshift___Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___ilshift___Tensor_typed_handle();
    return op.call(self, other);
}

// aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & __ilshift___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___ilshift___Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift_Tensor::schema> create_bitwise_left_shift_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_left_shift_Tensor::name, bitwise_left_shift_Tensor::overload_name)
      .typed<bitwise_left_shift_Tensor::schema>();
}

// aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor bitwise_left_shift_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_left_shift_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor bitwise_left_shift_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_left_shift_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift__Tensor::schema> create_bitwise_left_shift__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_left_shift__Tensor::name, bitwise_left_shift__Tensor::overload_name)
      .typed<bitwise_left_shift__Tensor::schema>();
}

// aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & bitwise_left_shift__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_left_shift__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & bitwise_left_shift__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_left_shift__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift_Tensor_out::schema> create_bitwise_left_shift_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_left_shift_Tensor_out::name, bitwise_left_shift_Tensor_out::overload_name)
      .typed<bitwise_left_shift_Tensor_out::schema>();
}

// aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_left_shift_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_left_shift_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_left_shift_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_left_shift_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift_Tensor_Scalar::schema> create_bitwise_left_shift_Tensor_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_left_shift_Tensor_Scalar::name, bitwise_left_shift_Tensor_Scalar::overload_name)
      .typed<bitwise_left_shift_Tensor_Scalar::schema>();
}

// aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor bitwise_left_shift_Tensor_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_left_shift_Tensor_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor bitwise_left_shift_Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_left_shift_Tensor_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift__Tensor_Scalar::schema> create_bitwise_left_shift__Tensor_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_left_shift__Tensor_Scalar::name, bitwise_left_shift__Tensor_Scalar::overload_name)
      .typed<bitwise_left_shift__Tensor_Scalar::schema>();
}

// aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & bitwise_left_shift__Tensor_Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_left_shift__Tensor_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & bitwise_left_shift__Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_left_shift__Tensor_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift_Tensor_Scalar_out::schema> create_bitwise_left_shift_Tensor_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_left_shift_Tensor_Scalar_out::name, bitwise_left_shift_Tensor_Scalar_out::overload_name)
      .typed<bitwise_left_shift_Tensor_Scalar_out::schema>();
}

// aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_left_shift_Tensor_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_bitwise_left_shift_Tensor_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_left_shift_Tensor_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_bitwise_left_shift_Tensor_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift_Scalar_Tensor::schema> create_bitwise_left_shift_Scalar_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_left_shift_Scalar_Tensor::name, bitwise_left_shift_Scalar_Tensor::overload_name)
      .typed<bitwise_left_shift_Scalar_Tensor::schema>();
}

// aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
at::Tensor bitwise_left_shift_Scalar_Tensor::call(const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_left_shift_Scalar_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
at::Tensor bitwise_left_shift_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_left_shift_Scalar_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<__rshift___Scalar::schema> create___rshift___Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__rshift___Scalar::name, __rshift___Scalar::overload_name)
      .typed<__rshift___Scalar::schema>();
}

// aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor __rshift___Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___rshift___Scalar_typed_handle();
    return op.call(self, other);
}

// aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor __rshift___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___rshift___Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<__rshift___Tensor::schema> create___rshift___Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__rshift___Tensor::name, __rshift___Tensor::overload_name)
      .typed<__rshift___Tensor::schema>();
}

// aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor __rshift___Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___rshift___Tensor_typed_handle();
    return op.call(self, other);
}

// aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor __rshift___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___rshift___Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__irshift___Scalar::schema> create___irshift___Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__irshift___Scalar::name, __irshift___Scalar::overload_name)
      .typed<__irshift___Scalar::schema>();
}

// aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & __irshift___Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___irshift___Scalar_typed_handle();
    return op.call(self, other);
}

// aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & __irshift___Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create___irshift___Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__irshift___Tensor::schema> create___irshift___Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__irshift___Tensor::name, __irshift___Tensor::overload_name)
      .typed<__irshift___Tensor::schema>();
}

// aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & __irshift___Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___irshift___Tensor_typed_handle();
    return op.call(self, other);
}

// aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & __irshift___Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create___irshift___Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift_Tensor::schema> create_bitwise_right_shift_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_right_shift_Tensor::name, bitwise_right_shift_Tensor::overload_name)
      .typed<bitwise_right_shift_Tensor::schema>();
}

// aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor bitwise_right_shift_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_right_shift_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor bitwise_right_shift_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_right_shift_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift__Tensor::schema> create_bitwise_right_shift__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_right_shift__Tensor::name, bitwise_right_shift__Tensor::overload_name)
      .typed<bitwise_right_shift__Tensor::schema>();
}

// aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & bitwise_right_shift__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_right_shift__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & bitwise_right_shift__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_right_shift__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift_Tensor_out::schema> create_bitwise_right_shift_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_right_shift_Tensor_out::name, bitwise_right_shift_Tensor_out::overload_name)
      .typed<bitwise_right_shift_Tensor_out::schema>();
}

// aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_right_shift_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_right_shift_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_right_shift_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_right_shift_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift_Tensor_Scalar::schema> create_bitwise_right_shift_Tensor_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_right_shift_Tensor_Scalar::name, bitwise_right_shift_Tensor_Scalar::overload_name)
      .typed<bitwise_right_shift_Tensor_Scalar::schema>();
}

// aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor bitwise_right_shift_Tensor_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_right_shift_Tensor_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor bitwise_right_shift_Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_right_shift_Tensor_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift__Tensor_Scalar::schema> create_bitwise_right_shift__Tensor_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_right_shift__Tensor_Scalar::name, bitwise_right_shift__Tensor_Scalar::overload_name)
      .typed<bitwise_right_shift__Tensor_Scalar::schema>();
}

// aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & bitwise_right_shift__Tensor_Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_right_shift__Tensor_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & bitwise_right_shift__Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_bitwise_right_shift__Tensor_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift_Tensor_Scalar_out::schema> create_bitwise_right_shift_Tensor_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_right_shift_Tensor_Scalar_out::name, bitwise_right_shift_Tensor_Scalar_out::overload_name)
      .typed<bitwise_right_shift_Tensor_Scalar_out::schema>();
}

// aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_right_shift_Tensor_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_bitwise_right_shift_Tensor_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_right_shift_Tensor_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_bitwise_right_shift_Tensor_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift_Scalar_Tensor::schema> create_bitwise_right_shift_Scalar_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_right_shift_Scalar_Tensor::name, bitwise_right_shift_Scalar_Tensor::overload_name)
      .typed<bitwise_right_shift_Scalar_Tensor::schema>();
}

// aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
at::Tensor bitwise_right_shift_Scalar_Tensor::call(const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_right_shift_Scalar_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
at::Tensor bitwise_right_shift_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_bitwise_right_shift_Scalar_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<tril_::schema> create_tril__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tril_::name, tril_::overload_name)
      .typed<tril_::schema>();
}

// aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
at::Tensor & tril_::call(at::Tensor & self, int64_t diagonal) {
    
    static auto op = create_tril__typed_handle();
    return op.call(self, diagonal);
}

// aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
at::Tensor & tril_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t diagonal) {
    
    static auto op = create_tril__typed_handle();
    return op.redispatch(dispatchKeySet, self, diagonal);
}

// aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<triu_::schema> create_triu__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(triu_::name, triu_::overload_name)
      .typed<triu_::schema>();
}

// aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
at::Tensor & triu_::call(at::Tensor & self, int64_t diagonal) {
    
    static auto op = create_triu__typed_handle();
    return op.call(self, diagonal);
}

// aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
at::Tensor & triu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t diagonal) {
    
    static auto op = create_triu__typed_handle();
    return op.redispatch(dispatchKeySet, self, diagonal);
}

// aten::digamma_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<digamma_::schema> create_digamma__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(digamma_::name, digamma_::overload_name)
      .typed<digamma_::schema>();
}

// aten::digamma_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & digamma_::call(at::Tensor & self) {
    
    static auto op = create_digamma__typed_handle();
    return op.call(self);
}

// aten::digamma_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & digamma_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_digamma__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lerp__Scalar::schema> create_lerp__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lerp__Scalar::name, lerp__Scalar::overload_name)
      .typed<lerp__Scalar::schema>();
}

// aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)
at::Tensor & lerp__Scalar::call(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
    
    static auto op = create_lerp__Scalar_typed_handle();
    return op.call(self, end, weight);
}

// aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)
at::Tensor & lerp__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
    
    static auto op = create_lerp__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, end, weight);
}

// aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lerp__Tensor::schema> create_lerp__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lerp__Tensor::name, lerp__Tensor::overload_name)
      .typed<lerp__Tensor::schema>();
}

// aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)
at::Tensor & lerp__Tensor::call(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
    
    static auto op = create_lerp__Tensor_typed_handle();
    return op.call(self, end, weight);
}

// aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)
at::Tensor & lerp__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
    
    static auto op = create_lerp__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, end, weight);
}

// aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<addbmm_::schema> create_addbmm__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addbmm_::name, addbmm_::overload_name)
      .typed<addbmm_::schema>();
}

// aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
at::Tensor & addbmm_::call(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addbmm__typed_handle();
    return op.call(self, batch1, batch2, beta, alpha);
}

// aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
at::Tensor & addbmm_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addbmm__typed_handle();
    return op.redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha);
}

// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<addbmm_out::schema> create_addbmm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addbmm_out::name, addbmm_out::overload_name)
      .typed<addbmm_out::schema>();
}

// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & addbmm_out::call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_addbmm_out_typed_handle();
    return op.call(self, batch1, batch2, beta, alpha, out);
}

// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & addbmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_addbmm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha, out);
}

// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<addbmm::schema> create_addbmm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addbmm::name, addbmm::overload_name)
      .typed<addbmm::schema>();
}

// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor addbmm::call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addbmm_typed_handle();
    return op.call(self, batch1, batch2, beta, alpha);
}

// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
at::Tensor addbmm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
    
    static auto op = create_addbmm_typed_handle();
    return op.redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha);
}

// aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<random__from::schema> create_random__from_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(random__from::name, random__from::overload_name)
      .typed<random__from::schema>();
}

// aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & random__from::call(at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_random__from_typed_handle();
    return op.call(self, from, to, generator);
}

// aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & random__from::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_random__from_typed_handle();
    return op.redispatch(dispatchKeySet, self, from, to, generator);
}

// aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<random__to::schema> create_random__to_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(random__to::name, random__to::overload_name)
      .typed<random__to::schema>();
}

// aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & random__to::call(at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_random__to_typed_handle();
    return op.call(self, to, generator);
}

// aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & random__to::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_random__to_typed_handle();
    return op.redispatch(dispatchKeySet, self, to, generator);
}

// aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<random_::schema> create_random__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(random_::name, random_::overload_name)
      .typed<random_::schema>();
}

// aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & random_::call(at::Tensor & self, ::std::optional<at::Generator> generator) {
    
    static auto op = create_random__typed_handle();
    return op.call(self, generator);
}

// aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & random_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, ::std::optional<at::Generator> generator) {
    
    static auto op = create_random__typed_handle();
    return op.redispatch(dispatchKeySet, self, generator);
}

// aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<uniform_::schema> create_uniform__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(uniform_::name, uniform_::overload_name)
      .typed<uniform_::schema>();
}

// aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & uniform_::call(at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_uniform__typed_handle();
    return op.call(self, from, to, generator);
}

// aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & uniform_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_uniform__typed_handle();
    return op.redispatch(dispatchKeySet, self, from, to, generator);
}

// aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cauchy_::schema> create_cauchy__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cauchy_::name, cauchy_::overload_name)
      .typed<cauchy_::schema>();
}

// aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & cauchy_::call(at::Tensor & self, double median, double sigma, ::std::optional<at::Generator> generator) {
    
    static auto op = create_cauchy__typed_handle();
    return op.call(self, median, sigma, generator);
}

// aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & cauchy_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double median, double sigma, ::std::optional<at::Generator> generator) {
    
    static auto op = create_cauchy__typed_handle();
    return op.redispatch(dispatchKeySet, self, median, sigma, generator);
}

// aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log_normal_::schema> create_log_normal__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_normal_::name, log_normal_::overload_name)
      .typed<log_normal_::schema>();
}

// aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & log_normal_::call(at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
    
    static auto op = create_log_normal__typed_handle();
    return op.call(self, mean, std, generator);
}

// aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & log_normal_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
    
    static auto op = create_log_normal__typed_handle();
    return op.redispatch(dispatchKeySet, self, mean, std, generator);
}

// aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<exponential_::schema> create_exponential__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(exponential_::name, exponential_::overload_name)
      .typed<exponential_::schema>();
}

// aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & exponential_::call(at::Tensor & self, double lambd, ::std::optional<at::Generator> generator) {
    
    static auto op = create_exponential__typed_handle();
    return op.call(self, lambd, generator);
}

// aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & exponential_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double lambd, ::std::optional<at::Generator> generator) {
    
    static auto op = create_exponential__typed_handle();
    return op.redispatch(dispatchKeySet, self, lambd, generator);
}

// aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<geometric_::schema> create_geometric__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(geometric_::name, geometric_::overload_name)
      .typed<geometric_::schema>();
}

// aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & geometric_::call(at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_geometric__typed_handle();
    return op.call(self, p, generator);
}

// aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & geometric_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_geometric__typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator);
}

// aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<diag_out::schema> create_diag_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diag_out::name, diag_out::overload_name)
      .typed<diag_out::schema>();
}

// aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diag_out::call(const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
    
    static auto op = create_diag_out_typed_handle();
    return op.call(self, diagonal, out);
}

// aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diag_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
    
    static auto op = create_diag_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, diagonal, out);
}

// aten::diag(Tensor self, int diagonal=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<diag::schema> create_diag_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diag::name, diag::overload_name)
      .typed<diag::schema>();
}

// aten::diag(Tensor self, int diagonal=0) -> Tensor
at::Tensor diag::call(const at::Tensor & self, int64_t diagonal) {
    
    static auto op = create_diag_typed_handle();
    return op.call(self, diagonal);
}

// aten::diag(Tensor self, int diagonal=0) -> Tensor
at::Tensor diag::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal) {
    
    static auto op = create_diag_typed_handle();
    return op.redispatch(dispatchKeySet, self, diagonal);
}

// aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cross_out::schema> create_cross_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cross_out::name, cross_out::overload_name)
      .typed<cross_out::schema>();
}

// aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cross_out::call(const at::Tensor & self, const at::Tensor & other, ::std::optional<int64_t> dim, at::Tensor & out) {
    
    static auto op = create_cross_out_typed_handle();
    return op.call(self, other, dim, out);
}

// aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cross_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, ::std::optional<int64_t> dim, at::Tensor & out) {
    
    static auto op = create_cross_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, dim, out);
}

// aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cross::schema> create_cross_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cross::name, cross::overload_name)
      .typed<cross::schema>();
}

// aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor
at::Tensor cross::call(const at::Tensor & self, const at::Tensor & other, ::std::optional<int64_t> dim) {
    
    static auto op = create_cross_typed_handle();
    return op.call(self, other, dim);
}

// aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor
at::Tensor cross::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, ::std::optional<int64_t> dim) {
    
    static auto op = create_cross_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, dim);
}

// aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<triu_out::schema> create_triu_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(triu_out::name, triu_out::overload_name)
      .typed<triu_out::schema>();
}

// aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & triu_out::call(const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
    
    static auto op = create_triu_out_typed_handle();
    return op.call(self, diagonal, out);
}

// aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & triu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
    
    static auto op = create_triu_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, diagonal, out);
}

// aten::triu(Tensor self, int diagonal=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<triu::schema> create_triu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(triu::name, triu::overload_name)
      .typed<triu::schema>();
}

// aten::triu(Tensor self, int diagonal=0) -> Tensor
at::Tensor triu::call(const at::Tensor & self, int64_t diagonal) {
    
    static auto op = create_triu_typed_handle();
    return op.call(self, diagonal);
}

// aten::triu(Tensor self, int diagonal=0) -> Tensor
at::Tensor triu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal) {
    
    static auto op = create_triu_typed_handle();
    return op.redispatch(dispatchKeySet, self, diagonal);
}

// aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<tril_out::schema> create_tril_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tril_out::name, tril_out::overload_name)
      .typed<tril_out::schema>();
}

// aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & tril_out::call(const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
    
    static auto op = create_tril_out_typed_handle();
    return op.call(self, diagonal, out);
}

// aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & tril_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
    
    static auto op = create_tril_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, diagonal, out);
}

// aten::tril(Tensor self, int diagonal=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<tril::schema> create_tril_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tril::name, tril::overload_name)
      .typed<tril::schema>();
}

// aten::tril(Tensor self, int diagonal=0) -> Tensor
at::Tensor tril::call(const at::Tensor & self, int64_t diagonal) {
    
    static auto op = create_tril_typed_handle();
    return op.call(self, diagonal);
}

// aten::tril(Tensor self, int diagonal=0) -> Tensor
at::Tensor tril::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal) {
    
    static auto op = create_tril_typed_handle();
    return op.redispatch(dispatchKeySet, self, diagonal);
}

// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<tril_indices::schema> create_tril_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tril_indices::name, tril_indices::overload_name)
      .typed<tril_indices::schema>();
}

// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor tril_indices::call(int64_t row, int64_t col, int64_t offset, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_tril_indices_typed_handle();
    return op.call(row, col, offset, dtype, layout, device, pin_memory);
}

// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor tril_indices::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_tril_indices_typed_handle();
    return op.redispatch(dispatchKeySet, row, col, offset, dtype, layout, device, pin_memory);
}

// aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<triu_indices::schema> create_triu_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(triu_indices::name, triu_indices::overload_name)
      .typed<triu_indices::schema>();
}

// aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor triu_indices::call(int64_t row, int64_t col, int64_t offset, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_triu_indices_typed_handle();
    return op.call(row, col, offset, dtype, layout, device, pin_memory);
}

// aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor triu_indices::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_triu_indices_typed_handle();
    return op.redispatch(dispatchKeySet, row, col, offset, dtype, layout, device, pin_memory);
}

// aten::trace(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<trace::schema> create_trace_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(trace::name, trace::overload_name)
      .typed<trace::schema>();
}

// aten::trace(Tensor self) -> Tensor
at::Tensor trace::call(const at::Tensor & self) {
    
    static auto op = create_trace_typed_handle();
    return op.call(self);
}

// aten::trace(Tensor self) -> Tensor
at::Tensor trace::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_trace_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<trace_backward::schema> create_trace_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(trace_backward::name, trace_backward::overload_name)
      .typed<trace_backward::schema>();
}

// aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor
at::Tensor trace_backward::call(const at::Tensor & grad, c10::SymIntArrayRef sizes) {
    
    static auto op = create_trace_backward_typed_handle();
    return op.call(grad, sizes);
}

// aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor
at::Tensor trace_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef sizes) {
    
    static auto op = create_trace_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, sizes);
}

// aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ne_Scalar_out::schema> create_ne_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ne_Scalar_out::name, ne_Scalar_out::overload_name)
      .typed<ne_Scalar_out::schema>();
}

// aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ne_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_ne_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ne_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_ne_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::ne.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ne_Scalar::schema> create_ne_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ne_Scalar::name, ne_Scalar::overload_name)
      .typed<ne_Scalar::schema>();
}

// aten::ne.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor ne_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_ne_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::ne.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor ne_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_ne_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ne_Tensor_out::schema> create_ne_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ne_Tensor_out::name, ne_Tensor_out::overload_name)
      .typed<ne_Tensor_out::schema>();
}

// aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ne_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_ne_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ne_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_ne_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::ne.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ne_Tensor::schema> create_ne_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ne_Tensor::name, ne_Tensor::overload_name)
      .typed<ne_Tensor::schema>();
}

// aten::ne.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor ne_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ne_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::ne.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor ne_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ne_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ne__Scalar::schema> create_ne__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ne__Scalar::name, ne__Scalar::overload_name)
      .typed<ne__Scalar::schema>();
}

// aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & ne__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_ne__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & ne__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_ne__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ne__Tensor::schema> create_ne__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ne__Tensor::name, ne__Tensor::overload_name)
      .typed<ne__Tensor::schema>();
}

// aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & ne__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ne__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & ne__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ne__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<not_equal_Scalar_out::schema> create_not_equal_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(not_equal_Scalar_out::name, not_equal_Scalar_out::overload_name)
      .typed<not_equal_Scalar_out::schema>();
}

// aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & not_equal_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_not_equal_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & not_equal_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_not_equal_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<not_equal_Scalar::schema> create_not_equal_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(not_equal_Scalar::name, not_equal_Scalar::overload_name)
      .typed<not_equal_Scalar::schema>();
}

// aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor not_equal_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_not_equal_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor not_equal_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_not_equal_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<not_equal_Tensor_out::schema> create_not_equal_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(not_equal_Tensor_out::name, not_equal_Tensor_out::overload_name)
      .typed<not_equal_Tensor_out::schema>();
}

// aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & not_equal_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_not_equal_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & not_equal_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_not_equal_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<not_equal_Tensor::schema> create_not_equal_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(not_equal_Tensor::name, not_equal_Tensor::overload_name)
      .typed<not_equal_Tensor::schema>();
}

// aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor not_equal_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_not_equal_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor not_equal_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_not_equal_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<not_equal__Scalar::schema> create_not_equal__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(not_equal__Scalar::name, not_equal__Scalar::overload_name)
      .typed<not_equal__Scalar::schema>();
}

// aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & not_equal__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_not_equal__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & not_equal__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_not_equal__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<not_equal__Tensor::schema> create_not_equal__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(not_equal__Tensor::name, not_equal__Tensor::overload_name)
      .typed<not_equal__Tensor::schema>();
}

// aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & not_equal__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_not_equal__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & not_equal__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_not_equal__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<eq_Scalar_out::schema> create_eq_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(eq_Scalar_out::name, eq_Scalar_out::overload_name)
      .typed<eq_Scalar_out::schema>();
}

// aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & eq_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_eq_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & eq_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_eq_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::eq.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<eq_Scalar::schema> create_eq_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(eq_Scalar::name, eq_Scalar::overload_name)
      .typed<eq_Scalar::schema>();
}

// aten::eq.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor eq_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_eq_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::eq.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor eq_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_eq_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<eq_Tensor_out::schema> create_eq_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(eq_Tensor_out::name, eq_Tensor_out::overload_name)
      .typed<eq_Tensor_out::schema>();
}

// aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & eq_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_eq_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & eq_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_eq_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::eq.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<eq_Tensor::schema> create_eq_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(eq_Tensor::name, eq_Tensor::overload_name)
      .typed<eq_Tensor::schema>();
}

// aten::eq.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor eq_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_eq_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::eq.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor eq_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_eq_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ge_Scalar_out::schema> create_ge_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ge_Scalar_out::name, ge_Scalar_out::overload_name)
      .typed<ge_Scalar_out::schema>();
}

// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ge_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_ge_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ge_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_ge_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ge_Scalar::schema> create_ge_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ge_Scalar::name, ge_Scalar::overload_name)
      .typed<ge_Scalar::schema>();
}

// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor ge_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_ge_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor ge_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_ge_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ge_Tensor_out::schema> create_ge_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ge_Tensor_out::name, ge_Tensor_out::overload_name)
      .typed<ge_Tensor_out::schema>();
}

// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ge_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_ge_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ge_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_ge_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ge_Tensor::schema> create_ge_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ge_Tensor::name, ge_Tensor::overload_name)
      .typed<ge_Tensor::schema>();
}

// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor ge_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ge_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor ge_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ge_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ge__Scalar::schema> create_ge__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ge__Scalar::name, ge__Scalar::overload_name)
      .typed<ge__Scalar::schema>();
}

// aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & ge__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_ge__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & ge__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_ge__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ge__Tensor::schema> create_ge__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ge__Tensor::name, ge__Tensor::overload_name)
      .typed<ge__Tensor::schema>();
}

// aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & ge__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ge__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & ge__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_ge__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<greater_equal_Scalar_out::schema> create_greater_equal_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(greater_equal_Scalar_out::name, greater_equal_Scalar_out::overload_name)
      .typed<greater_equal_Scalar_out::schema>();
}

// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & greater_equal_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_greater_equal_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & greater_equal_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_greater_equal_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<greater_equal_Scalar::schema> create_greater_equal_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(greater_equal_Scalar::name, greater_equal_Scalar::overload_name)
      .typed<greater_equal_Scalar::schema>();
}

// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor greater_equal_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_greater_equal_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor greater_equal_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_greater_equal_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<greater_equal_Tensor_out::schema> create_greater_equal_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(greater_equal_Tensor_out::name, greater_equal_Tensor_out::overload_name)
      .typed<greater_equal_Tensor_out::schema>();
}

// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & greater_equal_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_greater_equal_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & greater_equal_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_greater_equal_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<greater_equal_Tensor::schema> create_greater_equal_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(greater_equal_Tensor::name, greater_equal_Tensor::overload_name)
      .typed<greater_equal_Tensor::schema>();
}

// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor greater_equal_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_greater_equal_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor greater_equal_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_greater_equal_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<greater_equal__Scalar::schema> create_greater_equal__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(greater_equal__Scalar::name, greater_equal__Scalar::overload_name)
      .typed<greater_equal__Scalar::schema>();
}

// aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & greater_equal__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_greater_equal__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & greater_equal__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_greater_equal__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<greater_equal__Tensor::schema> create_greater_equal__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(greater_equal__Tensor::name, greater_equal__Tensor::overload_name)
      .typed<greater_equal__Tensor::schema>();
}

// aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & greater_equal__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_greater_equal__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & greater_equal__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_greater_equal__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<le_Scalar_out::schema> create_le_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(le_Scalar_out::name, le_Scalar_out::overload_name)
      .typed<le_Scalar_out::schema>();
}

// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & le_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_le_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & le_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_le_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::le.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<le_Scalar::schema> create_le_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(le_Scalar::name, le_Scalar::overload_name)
      .typed<le_Scalar::schema>();
}

// aten::le.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor le_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_le_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::le.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor le_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_le_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<le_Tensor_out::schema> create_le_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(le_Tensor_out::name, le_Tensor_out::overload_name)
      .typed<le_Tensor_out::schema>();
}

// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & le_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_le_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & le_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_le_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::le.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<le_Tensor::schema> create_le_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(le_Tensor::name, le_Tensor::overload_name)
      .typed<le_Tensor::schema>();
}

// aten::le.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor le_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_le_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::le.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor le_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_le_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<le__Scalar::schema> create_le__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(le__Scalar::name, le__Scalar::overload_name)
      .typed<le__Scalar::schema>();
}

// aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & le__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_le__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & le__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_le__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<le__Tensor::schema> create_le__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(le__Tensor::name, le__Tensor::overload_name)
      .typed<le__Tensor::schema>();
}

// aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & le__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_le__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & le__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_le__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less_equal_Scalar_out::schema> create_less_equal_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_equal_Scalar_out::name, less_equal_Scalar_out::overload_name)
      .typed<less_equal_Scalar_out::schema>();
}

// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_equal_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_less_equal_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_equal_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_less_equal_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<less_equal_Scalar::schema> create_less_equal_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_equal_Scalar::name, less_equal_Scalar::overload_name)
      .typed<less_equal_Scalar::schema>();
}

// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor less_equal_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less_equal_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor less_equal_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less_equal_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less_equal_Tensor_out::schema> create_less_equal_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_equal_Tensor_out::name, less_equal_Tensor_out::overload_name)
      .typed<less_equal_Tensor_out::schema>();
}

// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_equal_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_less_equal_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_equal_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_less_equal_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<less_equal_Tensor::schema> create_less_equal_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_equal_Tensor::name, less_equal_Tensor::overload_name)
      .typed<less_equal_Tensor::schema>();
}

// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor less_equal_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less_equal_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor less_equal_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less_equal_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less_equal__Scalar::schema> create_less_equal__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_equal__Scalar::name, less_equal__Scalar::overload_name)
      .typed<less_equal__Scalar::schema>();
}

// aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & less_equal__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less_equal__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & less_equal__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less_equal__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less_equal__Tensor::schema> create_less_equal__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_equal__Tensor::name, less_equal__Tensor::overload_name)
      .typed<less_equal__Tensor::schema>();
}

// aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & less_equal__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less_equal__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & less_equal__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less_equal__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gt_Scalar_out::schema> create_gt_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gt_Scalar_out::name, gt_Scalar_out::overload_name)
      .typed<gt_Scalar_out::schema>();
}

// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gt_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_gt_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gt_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_gt_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<gt_Scalar::schema> create_gt_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gt_Scalar::name, gt_Scalar::overload_name)
      .typed<gt_Scalar::schema>();
}

// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor gt_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_gt_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor gt_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_gt_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gt_Tensor_out::schema> create_gt_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gt_Tensor_out::name, gt_Tensor_out::overload_name)
      .typed<gt_Tensor_out::schema>();
}

// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gt_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_gt_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gt_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_gt_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<gt_Tensor::schema> create_gt_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gt_Tensor::name, gt_Tensor::overload_name)
      .typed<gt_Tensor::schema>();
}

// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor gt_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gt_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor gt_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gt_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gt__Scalar::schema> create_gt__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gt__Scalar::name, gt__Scalar::overload_name)
      .typed<gt__Scalar::schema>();
}

// aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & gt__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_gt__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & gt__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_gt__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gt__Tensor::schema> create_gt__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gt__Tensor::name, gt__Tensor::overload_name)
      .typed<gt__Tensor::schema>();
}

// aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & gt__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gt__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & gt__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_gt__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<greater_Scalar_out::schema> create_greater_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(greater_Scalar_out::name, greater_Scalar_out::overload_name)
      .typed<greater_Scalar_out::schema>();
}

// aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & greater_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_greater_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & greater_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_greater_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::greater.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<greater_Scalar::schema> create_greater_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(greater_Scalar::name, greater_Scalar::overload_name)
      .typed<greater_Scalar::schema>();
}

// aten::greater.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor greater_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_greater_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::greater.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor greater_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_greater_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<greater_Tensor_out::schema> create_greater_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(greater_Tensor_out::name, greater_Tensor_out::overload_name)
      .typed<greater_Tensor_out::schema>();
}

// aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & greater_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_greater_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & greater_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_greater_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::greater.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<greater_Tensor::schema> create_greater_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(greater_Tensor::name, greater_Tensor::overload_name)
      .typed<greater_Tensor::schema>();
}

// aten::greater.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor greater_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_greater_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::greater.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor greater_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_greater_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<greater__Scalar::schema> create_greater__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(greater__Scalar::name, greater__Scalar::overload_name)
      .typed<greater__Scalar::schema>();
}

// aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & greater__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_greater__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & greater__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_greater__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<greater__Tensor::schema> create_greater__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(greater__Tensor::name, greater__Tensor::overload_name)
      .typed<greater__Tensor::schema>();
}

// aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & greater__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_greater__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & greater__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_greater__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lt_Scalar_out::schema> create_lt_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lt_Scalar_out::name, lt_Scalar_out::overload_name)
      .typed<lt_Scalar_out::schema>();
}

// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lt_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_lt_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lt_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_lt_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<lt_Scalar::schema> create_lt_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lt_Scalar::name, lt_Scalar::overload_name)
      .typed<lt_Scalar::schema>();
}

// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor lt_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_lt_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor lt_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_lt_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lt_Tensor_out::schema> create_lt_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lt_Tensor_out::name, lt_Tensor_out::overload_name)
      .typed<lt_Tensor_out::schema>();
}

// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lt_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_lt_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lt_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_lt_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<lt_Tensor::schema> create_lt_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lt_Tensor::name, lt_Tensor::overload_name)
      .typed<lt_Tensor::schema>();
}

// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor lt_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_lt_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor lt_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_lt_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lt__Scalar::schema> create_lt__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lt__Scalar::name, lt__Scalar::overload_name)
      .typed<lt__Scalar::schema>();
}

// aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & lt__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_lt__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & lt__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_lt__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lt__Tensor::schema> create_lt__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lt__Tensor::name, lt__Tensor::overload_name)
      .typed<lt__Tensor::schema>();
}

// aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & lt__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_lt__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & lt__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_lt__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less_Scalar_out::schema> create_less_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_Scalar_out::name, less_Scalar_out::overload_name)
      .typed<less_Scalar_out::schema>();
}

// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_less_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_less_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::less.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<less_Scalar::schema> create_less_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_Scalar::name, less_Scalar::overload_name)
      .typed<less_Scalar::schema>();
}

// aten::less.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor less_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::less.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor less_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less_Tensor_out::schema> create_less_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_Tensor_out::name, less_Tensor_out::overload_name)
      .typed<less_Tensor_out::schema>();
}

// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_less_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & less_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_less_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::less.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<less_Tensor::schema> create_less_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less_Tensor::name, less_Tensor::overload_name)
      .typed<less_Tensor::schema>();
}

// aten::less.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor less_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::less.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor less_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less__Scalar::schema> create_less__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less__Scalar::name, less__Scalar::overload_name)
      .typed<less__Scalar::schema>();
}

// aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & less__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & less__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_less__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<less__Tensor::schema> create_less__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(less__Tensor::name, less__Tensor::overload_name)
      .typed<less__Tensor::schema>();
}

// aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & less__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & less__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_less__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<take_out::schema> create_take_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(take_out::name, take_out::overload_name)
      .typed<take_out::schema>();
}

// aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & take_out::call(const at::Tensor & self, const at::Tensor & index, at::Tensor & out) {
    
    static auto op = create_take_out_typed_handle();
    return op.call(self, index, out);
}

// aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & take_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, at::Tensor & out) {
    
    static auto op = create_take_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, index, out);
}

// aten::take(Tensor self, Tensor index) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<take::schema> create_take_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(take::name, take::overload_name)
      .typed<take::schema>();
}

// aten::take(Tensor self, Tensor index) -> Tensor
at::Tensor take::call(const at::Tensor & self, const at::Tensor & index) {
    
    static auto op = create_take_typed_handle();
    return op.call(self, index);
}

// aten::take(Tensor self, Tensor index) -> Tensor
at::Tensor take::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index) {
    
    static auto op = create_take_typed_handle();
    return op.redispatch(dispatchKeySet, self, index);
}

// aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<take_along_dim_out::schema> create_take_along_dim_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(take_along_dim_out::name, take_along_dim_out::overload_name)
      .typed<take_along_dim_out::schema>();
}

// aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & take_along_dim_out::call(const at::Tensor & self, const at::Tensor & indices, ::std::optional<int64_t> dim, at::Tensor & out) {
    
    static auto op = create_take_along_dim_out_typed_handle();
    return op.call(self, indices, dim, out);
}

// aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & take_along_dim_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, ::std::optional<int64_t> dim, at::Tensor & out) {
    
    static auto op = create_take_along_dim_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, dim, out);
}

// aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<take_along_dim::schema> create_take_along_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(take_along_dim::name, take_along_dim::overload_name)
      .typed<take_along_dim::schema>();
}

// aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor
at::Tensor take_along_dim::call(const at::Tensor & self, const at::Tensor & indices, ::std::optional<int64_t> dim) {
    
    static auto op = create_take_along_dim_typed_handle();
    return op.call(self, indices, dim);
}

// aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor
at::Tensor take_along_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, ::std::optional<int64_t> dim) {
    
    static auto op = create_take_along_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, dim);
}

// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_select_out::schema> create_index_select_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_select_out::name, index_select_out::overload_name)
      .typed<index_select_out::schema>();
}

// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_select_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) {
    
    static auto op = create_index_select_out_typed_handle();
    return op.call(self, dim, index, out);
}

// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_select_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) {
    
    static auto op = create_index_select_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, out);
}

// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<index_select::schema> create_index_select_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_select::name, index_select::overload_name)
      .typed<index_select::schema>();
}

// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor
at::Tensor index_select::call(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
    
    static auto op = create_index_select_typed_handle();
    return op.call(self, dim, index);
}

// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor
at::Tensor index_select::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index) {
    
    static auto op = create_index_select_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index);
}

// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_select_dimname_out::schema> create_index_select_dimname_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_select_dimname_out::name, index_select_dimname_out::overload_name)
      .typed<index_select_dimname_out::schema>();
}

// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_select_dimname_out::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out) {
    
    static auto op = create_index_select_dimname_out_typed_handle();
    return op.call(self, dim, index, out);
}

// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_select_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out) {
    
    static auto op = create_index_select_dimname_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, out);
}

// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<index_select_dimname::schema> create_index_select_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_select_dimname::name, index_select_dimname::overload_name)
      .typed<index_select_dimname::schema>();
}

// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor
at::Tensor index_select_dimname::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) {
    
    static auto op = create_index_select_dimname_typed_handle();
    return op.call(self, dim, index);
}

// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor
at::Tensor index_select_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index) {
    
    static auto op = create_index_select_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index);
}

// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<index_select_backward::schema> create_index_select_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_select_backward::name, index_select_backward::overload_name)
      .typed<index_select_backward::schema>();
}

// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor
at::Tensor index_select_backward::call(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
    
    static auto op = create_index_select_backward_typed_handle();
    return op.call(grad, self_sizes, dim, index);
}

// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor
at::Tensor index_select_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
    
    static auto op = create_index_select_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, self_sizes, dim, index);
}

// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<masked_select_out::schema> create_masked_select_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_select_out::name, masked_select_out::overload_name)
      .typed<masked_select_out::schema>();
}

// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & masked_select_out::call(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
    
    static auto op = create_masked_select_out_typed_handle();
    return op.call(self, mask, out);
}

// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & masked_select_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
    
    static auto op = create_masked_select_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, out);
}

// aten::masked_select(Tensor self, Tensor mask) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<masked_select::schema> create_masked_select_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_select::name, masked_select::overload_name)
      .typed<masked_select::schema>();
}

// aten::masked_select(Tensor self, Tensor mask) -> Tensor
at::Tensor masked_select::call(const at::Tensor & self, const at::Tensor & mask) {
    
    static auto op = create_masked_select_typed_handle();
    return op.call(self, mask);
}

// aten::masked_select(Tensor self, Tensor mask) -> Tensor
at::Tensor masked_select::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask) {
    
    static auto op = create_masked_select_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask);
}

// aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<masked_select_backward::schema> create_masked_select_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_select_backward::name, masked_select_backward::overload_name)
      .typed<masked_select_backward::schema>();
}

// aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor
at::Tensor masked_select_backward::call(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) {
    
    static auto op = create_masked_select_backward_typed_handle();
    return op.call(grad, input, mask);
}

// aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor
at::Tensor masked_select_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) {
    
    static auto op = create_masked_select_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, input, mask);
}

// aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nonzero_out::schema> create_nonzero_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nonzero_out::name, nonzero_out::overload_name)
      .typed<nonzero_out::schema>();
}

// aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nonzero_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_nonzero_out_typed_handle();
    return op.call(self, out);
}

// aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nonzero_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_nonzero_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::nonzero(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nonzero::schema> create_nonzero_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nonzero::name, nonzero::overload_name)
      .typed<nonzero::schema>();
}

// aten::nonzero(Tensor self) -> Tensor
at::Tensor nonzero::call(const at::Tensor & self) {
    
    static auto op = create_nonzero_typed_handle();
    return op.call(self);
}

// aten::nonzero(Tensor self) -> Tensor
at::Tensor nonzero::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_nonzero_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::nonzero_static.out(Tensor self, *, SymInt size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nonzero_static_out::schema> create_nonzero_static_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nonzero_static_out::name, nonzero_static_out::overload_name)
      .typed<nonzero_static_out::schema>();
}

// aten::nonzero_static.out(Tensor self, *, SymInt size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nonzero_static_out::call(const at::Tensor & self, c10::SymInt size, int64_t fill_value, at::Tensor & out) {
    
    static auto op = create_nonzero_static_out_typed_handle();
    return op.call(self, size, fill_value, out);
}

// aten::nonzero_static.out(Tensor self, *, SymInt size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nonzero_static_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt size, int64_t fill_value, at::Tensor & out) {
    
    static auto op = create_nonzero_static_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, fill_value, out);
}

// aten::nonzero_static(Tensor self, *, SymInt size, int fill_value=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nonzero_static::schema> create_nonzero_static_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nonzero_static::name, nonzero_static::overload_name)
      .typed<nonzero_static::schema>();
}

// aten::nonzero_static(Tensor self, *, SymInt size, int fill_value=-1) -> Tensor
at::Tensor nonzero_static::call(const at::Tensor & self, c10::SymInt size, int64_t fill_value) {
    
    static auto op = create_nonzero_static_typed_handle();
    return op.call(self, size, fill_value);
}

// aten::nonzero_static(Tensor self, *, SymInt size, int fill_value=-1) -> Tensor
at::Tensor nonzero_static::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt size, int64_t fill_value) {
    
    static auto op = create_nonzero_static_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, fill_value);
}

// aten::nonzero_numpy(Tensor self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<nonzero_numpy::schema> create_nonzero_numpy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nonzero_numpy::name, nonzero_numpy::overload_name)
      .typed<nonzero_numpy::schema>();
}

// aten::nonzero_numpy(Tensor self) -> Tensor[]
::std::vector<at::Tensor> nonzero_numpy::call(const at::Tensor & self) {
    
    static auto op = create_nonzero_numpy_typed_handle();
    return op.call(self);
}

// aten::nonzero_numpy(Tensor self) -> Tensor[]
::std::vector<at::Tensor> nonzero_numpy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_nonzero_numpy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::argwhere(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<argwhere::schema> create_argwhere_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(argwhere::name, argwhere::overload_name)
      .typed<argwhere::schema>();
}

// aten::argwhere(Tensor self) -> Tensor
at::Tensor argwhere::call(const at::Tensor & self) {
    
    static auto op = create_argwhere_typed_handle();
    return op.call(self);
}

// aten::argwhere(Tensor self) -> Tensor
at::Tensor argwhere::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_argwhere_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gather_out::schema> create_gather_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gather_out::name, gather_out::overload_name)
      .typed<gather_out::schema>();
}

// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gather_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
    
    static auto op = create_gather_out_typed_handle();
    return op.call(self, dim, index, sparse_grad, out);
}

// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gather_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
    
    static auto op = create_gather_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, sparse_grad, out);
}

// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<gather::schema> create_gather_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gather::name, gather::overload_name)
      .typed<gather::schema>();
}

// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor
at::Tensor gather::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
    
    static auto op = create_gather_typed_handle();
    return op.call(self, dim, index, sparse_grad);
}

// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor
at::Tensor gather::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
    
    static auto op = create_gather_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, sparse_grad);
}

// aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<gather_backward::schema> create_gather_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gather_backward::name, gather_backward::overload_name)
      .typed<gather_backward::schema>();
}

// aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor
at::Tensor gather_backward::call(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
    
    static auto op = create_gather_backward_typed_handle();
    return op.call(grad, self, dim, index, sparse_grad);
}

// aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor
at::Tensor gather_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
    
    static auto op = create_gather_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, self, dim, index, sparse_grad);
}

// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<gather_dimname_out::schema> create_gather_dimname_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gather_dimname_out::name, gather_dimname_out::overload_name)
      .typed<gather_dimname_out::schema>();
}

// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gather_dimname_out::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
    
    static auto op = create_gather_dimname_out_typed_handle();
    return op.call(self, dim, index, sparse_grad, out);
}

// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & gather_dimname_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
    
    static auto op = create_gather_dimname_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, sparse_grad, out);
}

// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<gather_dimname::schema> create_gather_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(gather_dimname::name, gather_dimname::overload_name)
      .typed<gather_dimname::schema>();
}

// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor
at::Tensor gather_dimname::call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) {
    
    static auto op = create_gather_dimname_typed_handle();
    return op.call(self, dim, index, sparse_grad);
}

// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor
at::Tensor gather_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) {
    
    static auto op = create_gather_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, sparse_grad);
}

// aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_gather_sparse_backward::schema> create__gather_sparse_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_gather_sparse_backward::name, _gather_sparse_backward::overload_name)
      .typed<_gather_sparse_backward::schema>();
}

// aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor
at::Tensor _gather_sparse_backward::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) {
    
    static auto op = create__gather_sparse_backward_typed_handle();
    return op.call(self, dim, index, grad);
}

// aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor
at::Tensor _gather_sparse_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) {
    
    static auto op = create__gather_sparse_backward_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, grad);
}

// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<addcmul_out::schema> create_addcmul_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addcmul_out::name, addcmul_out::overload_name)
      .typed<addcmul_out::schema>();
}

// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & addcmul_out::call(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_addcmul_out_typed_handle();
    return op.call(self, tensor1, tensor2, value, out);
}

// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & addcmul_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_addcmul_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value, out);
}

// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<addcmul::schema> create_addcmul_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addcmul::name, addcmul::overload_name)
      .typed<addcmul::schema>();
}

// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
at::Tensor addcmul::call(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
    
    static auto op = create_addcmul_typed_handle();
    return op.call(self, tensor1, tensor2, value);
}

// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
at::Tensor addcmul::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
    
    static auto op = create_addcmul_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
}

// aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<addcmul_::schema> create_addcmul__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addcmul_::name, addcmul_::overload_name)
      .typed<addcmul_::schema>();
}

// aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
at::Tensor & addcmul_::call(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
    
    static auto op = create_addcmul__typed_handle();
    return op.call(self, tensor1, tensor2, value);
}

// aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
at::Tensor & addcmul_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
    
    static auto op = create_addcmul__typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
}

// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<addcdiv_out::schema> create_addcdiv_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addcdiv_out::name, addcdiv_out::overload_name)
      .typed<addcdiv_out::schema>();
}

// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & addcdiv_out::call(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_addcdiv_out_typed_handle();
    return op.call(self, tensor1, tensor2, value, out);
}

// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & addcdiv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_addcdiv_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value, out);
}

// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<addcdiv::schema> create_addcdiv_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addcdiv::name, addcdiv::overload_name)
      .typed<addcdiv::schema>();
}

// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
at::Tensor addcdiv::call(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
    
    static auto op = create_addcdiv_typed_handle();
    return op.call(self, tensor1, tensor2, value);
}

// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
at::Tensor addcdiv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
    
    static auto op = create_addcdiv_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
}

// aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<addcdiv_::schema> create_addcdiv__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(addcdiv_::name, addcdiv_::overload_name)
      .typed<addcdiv_::schema>();
}

// aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
at::Tensor & addcdiv_::call(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
    
    static auto op = create_addcdiv__typed_handle();
    return op.call(self, tensor1, tensor2, value);
}

// aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
at::Tensor & addcdiv_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
    
    static auto op = create_addcdiv__typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
}

// aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cross_entropy_loss::schema> create_cross_entropy_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cross_entropy_loss::name, cross_entropy_loss::overload_name)
      .typed<cross_entropy_loss::schema>();
}

// aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor
at::Tensor cross_entropy_loss::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) {
    
    static auto op = create_cross_entropy_loss_typed_handle();
    return op.call(self, target, weight, reduction, ignore_index, label_smoothing);
}

// aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor
at::Tensor cross_entropy_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) {
    
    static auto op = create_cross_entropy_loss_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, label_smoothing);
}

// aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
static C10_NOINLINE c10::TypedOperatorHandle<triangular_solve_X::schema> create_triangular_solve_X_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(triangular_solve_X::name, triangular_solve_X::overload_name)
      .typed<triangular_solve_X::schema>();
}

// aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_X::call(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) {
    
    static auto op = create_triangular_solve_X_typed_handle();
    return op.call(self, A, upper, transpose, unitriangular, X, M);
}

// aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_X::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) {
    
    static auto op = create_triangular_solve_X_typed_handle();
    return op.redispatch(dispatchKeySet, self, A, upper, transpose, unitriangular, X, M);
}

// aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)
static C10_NOINLINE c10::TypedOperatorHandle<triangular_solve::schema> create_triangular_solve_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(triangular_solve::name, triangular_solve::overload_name)
      .typed<triangular_solve::schema>();
}

// aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)
::std::tuple<at::Tensor,at::Tensor> triangular_solve::call(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
    
    static auto op = create_triangular_solve_typed_handle();
    return op.call(self, A, upper, transpose, unitriangular);
}

// aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)
::std::tuple<at::Tensor,at::Tensor> triangular_solve::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
    
    static auto op = create_triangular_solve_typed_handle();
    return op.redispatch(dispatchKeySet, self, A, upper, transpose, unitriangular);
}

// aten::_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_linalg_check_errors::schema> create__linalg_check_errors_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_linalg_check_errors::name, _linalg_check_errors::overload_name)
      .typed<_linalg_check_errors::schema>();
}

// aten::_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()
void _linalg_check_errors::call(const at::Tensor & info, c10::string_view api_name, bool is_matrix) {
    
    static auto op = create__linalg_check_errors_typed_handle();
    return op.call(info, api_name, is_matrix);
}

// aten::_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()
void _linalg_check_errors::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & info, c10::string_view api_name, bool is_matrix) {
    
    static auto op = create__linalg_check_errors_typed_handle();
    return op.redispatch(dispatchKeySet, info, api_name, is_matrix);
}

// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_solve_triangular_out::schema> create_linalg_solve_triangular_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_solve_triangular_out::name, linalg_solve_triangular_out::overload_name)
      .typed<linalg_solve_triangular_out::schema>();
}

// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_solve_triangular_out::call(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out) {
    
    static auto op = create_linalg_solve_triangular_out_typed_handle();
    return op.call(self, B, upper, left, unitriangular, out);
}

// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_solve_triangular_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out) {
    
    static auto op = create_linalg_solve_triangular_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, B, upper, left, unitriangular, out);
}

// aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_solve_triangular::schema> create_linalg_solve_triangular_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_solve_triangular::name, linalg_solve_triangular::overload_name)
      .typed<linalg_solve_triangular::schema>();
}

// aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor
at::Tensor linalg_solve_triangular::call(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) {
    
    static auto op = create_linalg_solve_triangular_typed_handle();
    return op.call(self, B, upper, left, unitriangular);
}

// aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor
at::Tensor linalg_solve_triangular::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) {
    
    static auto op = create_linalg_solve_triangular_typed_handle();
    return op.redispatch(dispatchKeySet, self, B, upper, left, unitriangular);
}

// aten::linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_vander::schema> create_linalg_vander_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_vander::name, linalg_vander::overload_name)
      .typed<linalg_vander::schema>();
}

// aten::linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor
at::Tensor linalg_vander::call(const at::Tensor & x, ::std::optional<c10::SymInt> N) {
    
    static auto op = create_linalg_vander_typed_handle();
    return op.call(x, N);
}

// aten::linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor
at::Tensor linalg_vander::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, ::std::optional<c10::SymInt> N) {
    
    static auto op = create_linalg_vander_typed_handle();
    return op.redispatch(dispatchKeySet, x, N);
}

// aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
static C10_NOINLINE c10::TypedOperatorHandle<svd_U::schema> create_svd_U_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(svd_U::name, svd_U::overload_name)
      .typed<svd_U::schema>();
}

// aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> svd_U::call(const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V) {
    
    static auto op = create_svd_U_typed_handle();
    return op.call(self, some, compute_uv, U, S, V);
}

// aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> svd_U::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V) {
    
    static auto op = create_svd_U_typed_handle();
    return op.redispatch(dispatchKeySet, self, some, compute_uv, U, S, V);
}

// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
static C10_NOINLINE c10::TypedOperatorHandle<svd::schema> create_svd_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(svd::name, svd::overload_name)
      .typed<svd::schema>();
}

// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> svd::call(const at::Tensor & self, bool some, bool compute_uv) {
    
    static auto op = create_svd_typed_handle();
    return op.call(self, some, compute_uv);
}

// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> svd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, bool compute_uv) {
    
    static auto op = create_svd_typed_handle();
    return op.redispatch(dispatchKeySet, self, some, compute_uv);
}

// aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<swapaxes::schema> create_swapaxes_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(swapaxes::name, swapaxes::overload_name)
      .typed<swapaxes::schema>();
}

// aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)
at::Tensor swapaxes::call(const at::Tensor & self, int64_t axis0, int64_t axis1) {
    
    static auto op = create_swapaxes_typed_handle();
    return op.call(self, axis0, axis1);
}

// aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)
at::Tensor swapaxes::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t axis0, int64_t axis1) {
    
    static auto op = create_swapaxes_typed_handle();
    return op.redispatch(dispatchKeySet, self, axis0, axis1);
}

// aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<swapaxes_::schema> create_swapaxes__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(swapaxes_::name, swapaxes_::overload_name)
      .typed<swapaxes_::schema>();
}

// aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)
at::Tensor & swapaxes_::call(at::Tensor & self, int64_t axis0, int64_t axis1) {
    
    static auto op = create_swapaxes__typed_handle();
    return op.call(self, axis0, axis1);
}

// aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)
at::Tensor & swapaxes_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t axis0, int64_t axis1) {
    
    static auto op = create_swapaxes__typed_handle();
    return op.redispatch(dispatchKeySet, self, axis0, axis1);
}

// aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<swapdims::schema> create_swapdims_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(swapdims::name, swapdims::overload_name)
      .typed<swapdims::schema>();
}

// aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
at::Tensor swapdims::call(const at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create_swapdims_typed_handle();
    return op.call(self, dim0, dim1);
}

// aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
at::Tensor swapdims::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create_swapdims_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim0, dim1);
}

// aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<swapdims_::schema> create_swapdims__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(swapdims_::name, swapdims_::overload_name)
      .typed<swapdims_::schema>();
}

// aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
at::Tensor & swapdims_::call(at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create_swapdims__typed_handle();
    return op.call(self, dim0, dim1);
}

// aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
at::Tensor & swapdims_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create_swapdims__typed_handle();
    return op.redispatch(dispatchKeySet, self, dim0, dim1);
}

// aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cholesky_out::schema> create_cholesky_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cholesky_out::name, cholesky_out::overload_name)
      .typed<cholesky_out::schema>();
}

// aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cholesky_out::call(const at::Tensor & self, bool upper, at::Tensor & out) {
    
    static auto op = create_cholesky_out_typed_handle();
    return op.call(self, upper, out);
}

// aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cholesky_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) {
    
    static auto op = create_cholesky_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, upper, out);
}

// aten::cholesky(Tensor self, bool upper=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cholesky::schema> create_cholesky_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cholesky::name, cholesky::overload_name)
      .typed<cholesky::schema>();
}

// aten::cholesky(Tensor self, bool upper=False) -> Tensor
at::Tensor cholesky::call(const at::Tensor & self, bool upper) {
    
    static auto op = create_cholesky_typed_handle();
    return op.call(self, upper);
}

// aten::cholesky(Tensor self, bool upper=False) -> Tensor
at::Tensor cholesky::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper) {
    
    static auto op = create_cholesky_typed_handle();
    return op.redispatch(dispatchKeySet, self, upper);
}

// aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cholesky_solve_out::schema> create_cholesky_solve_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cholesky_solve_out::name, cholesky_solve_out::overload_name)
      .typed<cholesky_solve_out::schema>();
}

// aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cholesky_solve_out::call(const at::Tensor & self, const at::Tensor & input2, bool upper, at::Tensor & out) {
    
    static auto op = create_cholesky_solve_out_typed_handle();
    return op.call(self, input2, upper, out);
}

// aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cholesky_solve_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, bool upper, at::Tensor & out) {
    
    static auto op = create_cholesky_solve_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, input2, upper, out);
}

// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cholesky_solve::schema> create_cholesky_solve_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cholesky_solve::name, cholesky_solve::overload_name)
      .typed<cholesky_solve::schema>();
}

// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor
at::Tensor cholesky_solve::call(const at::Tensor & self, const at::Tensor & input2, bool upper) {
    
    static auto op = create_cholesky_solve_typed_handle();
    return op.call(self, input2, upper);
}

// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor
at::Tensor cholesky_solve::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, bool upper) {
    
    static auto op = create_cholesky_solve_typed_handle();
    return op.redispatch(dispatchKeySet, self, input2, upper);
}

// aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_cholesky_solve_helper::schema> create__cholesky_solve_helper_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cholesky_solve_helper::name, _cholesky_solve_helper::overload_name)
      .typed<_cholesky_solve_helper::schema>();
}

// aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor
at::Tensor _cholesky_solve_helper::call(const at::Tensor & self, const at::Tensor & A, bool upper) {
    
    static auto op = create__cholesky_solve_helper_typed_handle();
    return op.call(self, A, upper);
}

// aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor
at::Tensor _cholesky_solve_helper::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper) {
    
    static auto op = create__cholesky_solve_helper_typed_handle();
    return op.redispatch(dispatchKeySet, self, A, upper);
}

// aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cholesky_inverse::schema> create_cholesky_inverse_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cholesky_inverse::name, cholesky_inverse::overload_name)
      .typed<cholesky_inverse::schema>();
}

// aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor
at::Tensor cholesky_inverse::call(const at::Tensor & self, bool upper) {
    
    static auto op = create_cholesky_inverse_typed_handle();
    return op.call(self, upper);
}

// aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor
at::Tensor cholesky_inverse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper) {
    
    static auto op = create_cholesky_inverse_typed_handle();
    return op.redispatch(dispatchKeySet, self, upper);
}

// aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cholesky_inverse_out::schema> create_cholesky_inverse_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cholesky_inverse_out::name, cholesky_inverse_out::overload_name)
      .typed<cholesky_inverse_out::schema>();
}

// aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cholesky_inverse_out::call(const at::Tensor & self, bool upper, at::Tensor & out) {
    
    static auto op = create_cholesky_inverse_out_typed_handle();
    return op.call(self, upper, out);
}

// aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cholesky_inverse_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) {
    
    static auto op = create_cholesky_inverse_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, upper, out);
}

// aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
static C10_NOINLINE c10::TypedOperatorHandle<qr_Q::schema> create_qr_Q_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(qr_Q::name, qr_Q::overload_name)
      .typed<qr_Q::schema>();
}

// aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
::std::tuple<at::Tensor &,at::Tensor &> qr_Q::call(const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R) {
    
    static auto op = create_qr_Q_typed_handle();
    return op.call(self, some, Q, R);
}

// aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
::std::tuple<at::Tensor &,at::Tensor &> qr_Q::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R) {
    
    static auto op = create_qr_Q_typed_handle();
    return op.redispatch(dispatchKeySet, self, some, Q, R);
}

// aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)
static C10_NOINLINE c10::TypedOperatorHandle<qr::schema> create_qr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(qr::name, qr::overload_name)
      .typed<qr::schema>();
}

// aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)
::std::tuple<at::Tensor,at::Tensor> qr::call(const at::Tensor & self, bool some) {
    
    static auto op = create_qr_typed_handle();
    return op.call(self, some);
}

// aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)
::std::tuple<at::Tensor,at::Tensor> qr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some) {
    
    static auto op = create_qr_typed_handle();
    return op.redispatch(dispatchKeySet, self, some);
}

// aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)
static C10_NOINLINE c10::TypedOperatorHandle<geqrf_a::schema> create_geqrf_a_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(geqrf_a::name, geqrf_a::overload_name)
      .typed<geqrf_a::schema>();
}

// aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)
::std::tuple<at::Tensor &,at::Tensor &> geqrf_a::call(const at::Tensor & self, at::Tensor & a, at::Tensor & tau) {
    
    static auto op = create_geqrf_a_typed_handle();
    return op.call(self, a, tau);
}

// aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)
::std::tuple<at::Tensor &,at::Tensor &> geqrf_a::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & a, at::Tensor & tau) {
    
    static auto op = create_geqrf_a_typed_handle();
    return op.redispatch(dispatchKeySet, self, a, tau);
}

// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)
static C10_NOINLINE c10::TypedOperatorHandle<geqrf::schema> create_geqrf_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(geqrf::name, geqrf::overload_name)
      .typed<geqrf::schema>();
}

// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)
::std::tuple<at::Tensor,at::Tensor> geqrf::call(const at::Tensor & self) {
    
    static auto op = create_geqrf_typed_handle();
    return op.call(self);
}

// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)
::std::tuple<at::Tensor,at::Tensor> geqrf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_geqrf_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::orgqr(Tensor self, Tensor input2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<orgqr::schema> create_orgqr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(orgqr::name, orgqr::overload_name)
      .typed<orgqr::schema>();
}

// aten::orgqr(Tensor self, Tensor input2) -> Tensor
at::Tensor orgqr::call(const at::Tensor & self, const at::Tensor & input2) {
    
    static auto op = create_orgqr_typed_handle();
    return op.call(self, input2);
}

// aten::orgqr(Tensor self, Tensor input2) -> Tensor
at::Tensor orgqr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2) {
    
    static auto op = create_orgqr_typed_handle();
    return op.redispatch(dispatchKeySet, self, input2);
}

// aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<orgqr_out::schema> create_orgqr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(orgqr_out::name, orgqr_out::overload_name)
      .typed<orgqr_out::schema>();
}

// aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & orgqr_out::call(const at::Tensor & self, const at::Tensor & input2, at::Tensor & out) {
    
    static auto op = create_orgqr_out_typed_handle();
    return op.call(self, input2, out);
}

// aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & orgqr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, at::Tensor & out) {
    
    static auto op = create_orgqr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, input2, out);
}

// aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ormqr_out::schema> create_ormqr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ormqr_out::name, ormqr_out::overload_name)
      .typed<ormqr_out::schema>();
}

// aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ormqr_out::call(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out) {
    
    static auto op = create_ormqr_out_typed_handle();
    return op.call(self, input2, input3, left, transpose, out);
}

// aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ormqr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out) {
    
    static auto op = create_ormqr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, input2, input3, left, transpose, out);
}

// aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ormqr::schema> create_ormqr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ormqr::name, ormqr::overload_name)
      .typed<ormqr::schema>();
}

// aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor
at::Tensor ormqr::call(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) {
    
    static auto op = create_ormqr_typed_handle();
    return op.call(self, input2, input3, left, transpose);
}

// aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor
at::Tensor ormqr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) {
    
    static auto op = create_ormqr_typed_handle();
    return op.redispatch(dispatchKeySet, self, input2, input3, left, transpose);
}

// aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)
static C10_NOINLINE c10::TypedOperatorHandle<_lu_with_info::schema> create__lu_with_info_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_lu_with_info::name, _lu_with_info::overload_name)
      .typed<_lu_with_info::schema>();
}

// aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _lu_with_info::call(const at::Tensor & self, bool pivot, bool check_errors) {
    
    static auto op = create__lu_with_info_typed_handle();
    return op.call(self, pivot, check_errors);
}

// aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _lu_with_info::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool pivot, bool check_errors) {
    
    static auto op = create__lu_with_info_typed_handle();
    return op.redispatch(dispatchKeySet, self, pivot, check_errors);
}

// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lu_solve_out::schema> create_lu_solve_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lu_solve_out::name, lu_solve_out::overload_name)
      .typed<lu_solve_out::schema>();
}

// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lu_solve_out::call(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) {
    
    static auto op = create_lu_solve_out_typed_handle();
    return op.call(self, LU_data, LU_pivots, out);
}

// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lu_solve_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) {
    
    static auto op = create_lu_solve_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, LU_data, LU_pivots, out);
}

// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<lu_solve::schema> create_lu_solve_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lu_solve::name, lu_solve::overload_name)
      .typed<lu_solve::schema>();
}

// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
at::Tensor lu_solve::call(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) {
    
    static auto op = create_lu_solve_typed_handle();
    return op.call(self, LU_data, LU_pivots);
}

// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
at::Tensor lu_solve::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) {
    
    static auto op = create_lu_solve_typed_handle();
    return op.redispatch(dispatchKeySet, self, LU_data, LU_pivots);
}

// aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)
static C10_NOINLINE c10::TypedOperatorHandle<lu_unpack::schema> create_lu_unpack_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lu_unpack::name, lu_unpack::overload_name)
      .typed<lu_unpack::schema>();
}

// aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> lu_unpack::call(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
    
    static auto op = create_lu_unpack_typed_handle();
    return op.call(LU_data, LU_pivots, unpack_data, unpack_pivots);
}

// aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> lu_unpack::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
    
    static auto op = create_lu_unpack_typed_handle();
    return op.redispatch(dispatchKeySet, LU_data, LU_pivots, unpack_data, unpack_pivots);
}

// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
static C10_NOINLINE c10::TypedOperatorHandle<lu_unpack_out::schema> create_lu_unpack_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lu_unpack_out::name, lu_unpack_out::overload_name)
      .typed<lu_unpack_out::schema>();
}

// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> lu_unpack_out::call(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
    
    static auto op = create_lu_unpack_out_typed_handle();
    return op.call(LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U);
}

// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> lu_unpack_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
    
    static auto op = create_lu_unpack_out_typed_handle();
    return op.redispatch(dispatchKeySet, LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U);
}

// aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<multinomial_out::schema> create_multinomial_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multinomial_out::name, multinomial_out::overload_name)
      .typed<multinomial_out::schema>();
}

// aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & multinomial_out::call(const at::Tensor & self, int64_t num_samples, bool replacement, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_multinomial_out_typed_handle();
    return op.call(self, num_samples, replacement, generator, out);
}

// aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & multinomial_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_samples, bool replacement, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_multinomial_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, num_samples, replacement, generator, out);
}

// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<multinomial::schema> create_multinomial_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multinomial::name, multinomial::overload_name)
      .typed<multinomial::schema>();
}

// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor
at::Tensor multinomial::call(const at::Tensor & self, int64_t num_samples, bool replacement, ::std::optional<at::Generator> generator) {
    
    static auto op = create_multinomial_typed_handle();
    return op.call(self, num_samples, replacement, generator);
}

// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor
at::Tensor multinomial::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_samples, bool replacement, ::std::optional<at::Generator> generator) {
    
    static auto op = create_multinomial_typed_handle();
    return op.redispatch(dispatchKeySet, self, num_samples, replacement, generator);
}

// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lgamma_out::schema> create_lgamma_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lgamma_out::name, lgamma_out::overload_name)
      .typed<lgamma_out::schema>();
}

// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lgamma_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_lgamma_out_typed_handle();
    return op.call(self, out);
}

// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lgamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_lgamma_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::lgamma_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lgamma_::schema> create_lgamma__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lgamma_::name, lgamma_::overload_name)
      .typed<lgamma_::schema>();
}

// aten::lgamma_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & lgamma_::call(at::Tensor & self) {
    
    static auto op = create_lgamma__typed_handle();
    return op.call(self);
}

// aten::lgamma_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & lgamma_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_lgamma__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::lgamma(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<lgamma::schema> create_lgamma_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lgamma::name, lgamma::overload_name)
      .typed<lgamma::schema>();
}

// aten::lgamma(Tensor self) -> Tensor
at::Tensor lgamma::call(const at::Tensor & self) {
    
    static auto op = create_lgamma_typed_handle();
    return op.call(self);
}

// aten::lgamma(Tensor self) -> Tensor
at::Tensor lgamma::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_lgamma_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<digamma_out::schema> create_digamma_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(digamma_out::name, digamma_out::overload_name)
      .typed<digamma_out::schema>();
}

// aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & digamma_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_digamma_out_typed_handle();
    return op.call(self, out);
}

// aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & digamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_digamma_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::digamma(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<digamma::schema> create_digamma_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(digamma::name, digamma::overload_name)
      .typed<digamma::schema>();
}

// aten::digamma(Tensor self) -> Tensor
at::Tensor digamma::call(const at::Tensor & self) {
    
    static auto op = create_digamma_typed_handle();
    return op.call(self);
}

// aten::digamma(Tensor self) -> Tensor
at::Tensor digamma::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_digamma_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<polygamma_out::schema> create_polygamma_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(polygamma_out::name, polygamma_out::overload_name)
      .typed<polygamma_out::schema>();
}

// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & polygamma_out::call(int64_t n, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_polygamma_out_typed_handle();
    return op.call(n, self, out);
}

// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & polygamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_polygamma_out_typed_handle();
    return op.redispatch(dispatchKeySet, n, self, out);
}

// aten::polygamma(int n, Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<polygamma::schema> create_polygamma_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(polygamma::name, polygamma::overload_name)
      .typed<polygamma::schema>();
}

// aten::polygamma(int n, Tensor self) -> Tensor
at::Tensor polygamma::call(int64_t n, const at::Tensor & self) {
    
    static auto op = create_polygamma_typed_handle();
    return op.call(n, self);
}

// aten::polygamma(int n, Tensor self) -> Tensor
at::Tensor polygamma::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self) {
    
    static auto op = create_polygamma_typed_handle();
    return op.redispatch(dispatchKeySet, n, self);
}

// aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<polygamma_::schema> create_polygamma__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(polygamma_::name, polygamma_::overload_name)
      .typed<polygamma_::schema>();
}

// aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!)
at::Tensor & polygamma_::call(at::Tensor & self, int64_t n) {
    
    static auto op = create_polygamma__typed_handle();
    return op.call(self, n);
}

// aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!)
at::Tensor & polygamma_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t n) {
    
    static auto op = create_polygamma__typed_handle();
    return op.redispatch(dispatchKeySet, self, n);
}

// aten::erfinv(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<erfinv::schema> create_erfinv_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(erfinv::name, erfinv::overload_name)
      .typed<erfinv::schema>();
}

// aten::erfinv(Tensor self) -> Tensor
at::Tensor erfinv::call(const at::Tensor & self) {
    
    static auto op = create_erfinv_typed_handle();
    return op.call(self);
}

// aten::erfinv(Tensor self) -> Tensor
at::Tensor erfinv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_erfinv_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::erfinv_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<erfinv_::schema> create_erfinv__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(erfinv_::name, erfinv_::overload_name)
      .typed<erfinv_::schema>();
}

// aten::erfinv_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & erfinv_::call(at::Tensor & self) {
    
    static auto op = create_erfinv__typed_handle();
    return op.call(self);
}

// aten::erfinv_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & erfinv_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_erfinv__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<erfinv_out::schema> create_erfinv_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(erfinv_out::name, erfinv_out::overload_name)
      .typed<erfinv_out::schema>();
}

// aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & erfinv_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_erfinv_out_typed_handle();
    return op.call(self, out);
}

// aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & erfinv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_erfinv_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::i0(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<i0::schema> create_i0_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(i0::name, i0::overload_name)
      .typed<i0::schema>();
}

// aten::i0(Tensor self) -> Tensor
at::Tensor i0::call(const at::Tensor & self) {
    
    static auto op = create_i0_typed_handle();
    return op.call(self);
}

// aten::i0(Tensor self) -> Tensor
at::Tensor i0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_i0_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::i0_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<i0_::schema> create_i0__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(i0_::name, i0_::overload_name)
      .typed<i0_::schema>();
}

// aten::i0_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & i0_::call(at::Tensor & self) {
    
    static auto op = create_i0__typed_handle();
    return op.call(self);
}

// aten::i0_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & i0_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_i0__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<i0_out::schema> create_i0_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(i0_out::name, i0_out::overload_name)
      .typed<i0_out::schema>();
}

// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & i0_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_i0_out_typed_handle();
    return op.call(self, out);
}

// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & i0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_i0_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::sign(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sign::schema> create_sign_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sign::name, sign::overload_name)
      .typed<sign::schema>();
}

// aten::sign(Tensor self) -> Tensor
at::Tensor sign::call(const at::Tensor & self) {
    
    static auto op = create_sign_typed_handle();
    return op.call(self);
}

// aten::sign(Tensor self) -> Tensor
at::Tensor sign::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_sign_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sign_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sign_::schema> create_sign__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sign_::name, sign_::overload_name)
      .typed<sign_::schema>();
}

// aten::sign_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & sign_::call(at::Tensor & self) {
    
    static auto op = create_sign__typed_handle();
    return op.call(self);
}

// aten::sign_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & sign_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_sign__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sign_out::schema> create_sign_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sign_out::name, sign_out::overload_name)
      .typed<sign_out::schema>();
}

// aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sign_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_sign_out_typed_handle();
    return op.call(self, out);
}

// aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sign_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_sign_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::signbit(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<signbit::schema> create_signbit_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(signbit::name, signbit::overload_name)
      .typed<signbit::schema>();
}

// aten::signbit(Tensor self) -> Tensor
at::Tensor signbit::call(const at::Tensor & self) {
    
    static auto op = create_signbit_typed_handle();
    return op.call(self);
}

// aten::signbit(Tensor self) -> Tensor
at::Tensor signbit::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_signbit_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<signbit_out::schema> create_signbit_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(signbit_out::name, signbit_out::overload_name)
      .typed<signbit_out::schema>();
}

// aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & signbit_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_signbit_out_typed_handle();
    return op.call(self, out);
}

// aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & signbit_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_signbit_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<dist::schema> create_dist_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dist::name, dist::overload_name)
      .typed<dist::schema>();
}

// aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
at::Tensor dist::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p) {
    
    static auto op = create_dist_typed_handle();
    return op.call(self, other, p);
}

// aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
at::Tensor dist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & p) {
    
    static auto op = create_dist_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, p);
}

// aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<atan2_out::schema> create_atan2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atan2_out::name, atan2_out::overload_name)
      .typed<atan2_out::schema>();
}

// aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & atan2_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_atan2_out_typed_handle();
    return op.call(self, other, out);
}

// aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & atan2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_atan2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<atan2_::schema> create_atan2__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atan2_::name, atan2_::overload_name)
      .typed<atan2_::schema>();
}

// aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & atan2_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_atan2__typed_handle();
    return op.call(self, other);
}

// aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & atan2_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_atan2__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::atan2(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<atan2::schema> create_atan2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(atan2::name, atan2::overload_name)
      .typed<atan2::schema>();
}

// aten::atan2(Tensor self, Tensor other) -> Tensor
at::Tensor atan2::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_atan2_typed_handle();
    return op.call(self, other);
}

// aten::atan2(Tensor self, Tensor other) -> Tensor
at::Tensor atan2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_atan2_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::arctan2(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<arctan2::schema> create_arctan2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arctan2::name, arctan2::overload_name)
      .typed<arctan2::schema>();
}

// aten::arctan2(Tensor self, Tensor other) -> Tensor
at::Tensor arctan2::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_arctan2_typed_handle();
    return op.call(self, other);
}

// aten::arctan2(Tensor self, Tensor other) -> Tensor
at::Tensor arctan2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_arctan2_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arctan2_out::schema> create_arctan2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arctan2_out::name, arctan2_out::overload_name)
      .typed<arctan2_out::schema>();
}

// aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arctan2_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_arctan2_out_typed_handle();
    return op.call(self, other, out);
}

// aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & arctan2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_arctan2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<arctan2_::schema> create_arctan2__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(arctan2_::name, arctan2_::overload_name)
      .typed<arctan2_::schema>();
}

// aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & arctan2_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_arctan2__typed_handle();
    return op.call(self, other);
}

// aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & arctan2_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_arctan2__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lerp_Scalar_out::schema> create_lerp_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lerp_Scalar_out::name, lerp_Scalar_out::overload_name)
      .typed<lerp_Scalar_out::schema>();
}

// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lerp_Scalar_out::call(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out) {
    
    static auto op = create_lerp_Scalar_out_typed_handle();
    return op.call(self, end, weight, out);
}

// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lerp_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out) {
    
    static auto op = create_lerp_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, end, weight, out);
}

// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lerp_Tensor_out::schema> create_lerp_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lerp_Tensor_out::name, lerp_Tensor_out::overload_name)
      .typed<lerp_Tensor_out::schema>();
}

// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lerp_Tensor_out::call(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out) {
    
    static auto op = create_lerp_Tensor_out_typed_handle();
    return op.call(self, end, weight, out);
}

// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lerp_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out) {
    
    static auto op = create_lerp_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, end, weight, out);
}

// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<lerp_Scalar::schema> create_lerp_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lerp_Scalar::name, lerp_Scalar::overload_name)
      .typed<lerp_Scalar::schema>();
}

// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor
at::Tensor lerp_Scalar::call(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
    
    static auto op = create_lerp_Scalar_typed_handle();
    return op.call(self, end, weight);
}

// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor
at::Tensor lerp_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
    
    static auto op = create_lerp_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, end, weight);
}

// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<lerp_Tensor::schema> create_lerp_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lerp_Tensor::name, lerp_Tensor::overload_name)
      .typed<lerp_Tensor::schema>();
}

// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor
at::Tensor lerp_Tensor::call(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
    
    static auto op = create_lerp_Tensor_typed_handle();
    return op.call(self, end, weight);
}

// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor
at::Tensor lerp_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
    
    static auto op = create_lerp_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, end, weight);
}

// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<histc_out::schema> create_histc_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(histc_out::name, histc_out::overload_name)
      .typed<histc_out::schema>();
}

// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & histc_out::call(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) {
    
    static auto op = create_histc_out_typed_handle();
    return op.call(self, bins, min, max, out);
}

// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & histc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) {
    
    static auto op = create_histc_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, min, max, out);
}

// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<histc::schema> create_histc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(histc::name, histc::overload_name)
      .typed<histc::schema>();
}

// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor
at::Tensor histc::call(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) {
    
    static auto op = create_histc_typed_handle();
    return op.call(self, bins, min, max);
}

// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor
at::Tensor histc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) {
    
    static auto op = create_histc_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, min, max);
}

// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
static C10_NOINLINE c10::TypedOperatorHandle<histogram_bins_tensor_out::schema> create_histogram_bins_tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(histogram_bins_tensor_out::name, histogram_bins_tensor_out::overload_name)
      .typed<histogram_bins_tensor_out::schema>();
}

// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
::std::tuple<at::Tensor &,at::Tensor &> histogram_bins_tensor_out::call(const at::Tensor & self, const at::Tensor & bins, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
    
    static auto op = create_histogram_bins_tensor_out_typed_handle();
    return op.call(self, bins, weight, density, hist, bin_edges);
}

// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
::std::tuple<at::Tensor &,at::Tensor &> histogram_bins_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & bins, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
    
    static auto op = create_histogram_bins_tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, weight, density, hist, bin_edges);
}

// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
static C10_NOINLINE c10::TypedOperatorHandle<histogram_bins_tensor::schema> create_histogram_bins_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(histogram_bins_tensor::name, histogram_bins_tensor::overload_name)
      .typed<histogram_bins_tensor::schema>();
}

// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
::std::tuple<at::Tensor,at::Tensor> histogram_bins_tensor::call(const at::Tensor & self, const at::Tensor & bins, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogram_bins_tensor_typed_handle();
    return op.call(self, bins, weight, density);
}

// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
::std::tuple<at::Tensor,at::Tensor> histogram_bins_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & bins, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogram_bins_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, weight, density);
}

// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
static C10_NOINLINE c10::TypedOperatorHandle<histogram_bin_ct_out::schema> create_histogram_bin_ct_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(histogram_bin_ct_out::name, histogram_bin_ct_out::overload_name)
      .typed<histogram_bin_ct_out::schema>();
}

// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
::std::tuple<at::Tensor &,at::Tensor &> histogram_bin_ct_out::call(const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
    
    static auto op = create_histogram_bin_ct_out_typed_handle();
    return op.call(self, bins, range, weight, density, hist, bin_edges);
}

// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
::std::tuple<at::Tensor &,at::Tensor &> histogram_bin_ct_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
    
    static auto op = create_histogram_bin_ct_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, range, weight, density, hist, bin_edges);
}

// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
static C10_NOINLINE c10::TypedOperatorHandle<histogram_bin_ct::schema> create_histogram_bin_ct_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(histogram_bin_ct::name, histogram_bin_ct::overload_name)
      .typed<histogram_bin_ct::schema>();
}

// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
::std::tuple<at::Tensor,at::Tensor> histogram_bin_ct::call(const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogram_bin_ct_typed_handle();
    return op.call(self, bins, range, weight, density);
}

// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
::std::tuple<at::Tensor,at::Tensor> histogram_bin_ct::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogram_bin_ct_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, range, weight, density);
}

// aten::_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_histogramdd_bin_edges::schema> create__histogramdd_bin_edges_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_histogramdd_bin_edges::name, _histogramdd_bin_edges::overload_name)
      .typed<_histogramdd_bin_edges::schema>();
}

// aten::_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]
::std::vector<at::Tensor> _histogramdd_bin_edges::call(const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create__histogramdd_bin_edges_typed_handle();
    return op.call(self, bins, range, weight, density);
}

// aten::_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]
::std::vector<at::Tensor> _histogramdd_bin_edges::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create__histogramdd_bin_edges_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, range, weight, density);
}

// aten::_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_histogramdd_from_bin_cts::schema> create__histogramdd_from_bin_cts_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_histogramdd_from_bin_cts::name, _histogramdd_from_bin_cts::overload_name)
      .typed<_histogramdd_from_bin_cts::schema>();
}

// aten::_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor
at::Tensor _histogramdd_from_bin_cts::call(const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create__histogramdd_from_bin_cts_typed_handle();
    return op.call(self, bins, range, weight, density);
}

// aten::_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor
at::Tensor _histogramdd_from_bin_cts::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create__histogramdd_from_bin_cts_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, range, weight, density);
}

// aten::_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_histogramdd_from_bin_tensors::schema> create__histogramdd_from_bin_tensors_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_histogramdd_from_bin_tensors::name, _histogramdd_from_bin_tensors::overload_name)
      .typed<_histogramdd_from_bin_tensors::schema>();
}

// aten::_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor
at::Tensor _histogramdd_from_bin_tensors::call(const at::Tensor & self, at::TensorList bins, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create__histogramdd_from_bin_tensors_typed_handle();
    return op.call(self, bins, weight, density);
}

// aten::_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor
at::Tensor _histogramdd_from_bin_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create__histogramdd_from_bin_tensors_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, weight, density);
}

// aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
static C10_NOINLINE c10::TypedOperatorHandle<histogramdd::schema> create_histogramdd_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(histogramdd::name, histogramdd::overload_name)
      .typed<histogramdd::schema>();
}

// aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd::call(const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogramdd_typed_handle();
    return op.call(self, bins, range, weight, density);
}

// aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogramdd_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, range, weight, density);
}

// aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
static C10_NOINLINE c10::TypedOperatorHandle<histogramdd_int_bins::schema> create_histogramdd_int_bins_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(histogramdd_int_bins::name, histogramdd_int_bins::overload_name)
      .typed<histogramdd_int_bins::schema>();
}

// aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_int_bins::call(const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogramdd_int_bins_typed_handle();
    return op.call(self, bins, range, weight, density);
}

// aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_int_bins::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogramdd_int_bins_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, range, weight, density);
}

// aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
static C10_NOINLINE c10::TypedOperatorHandle<histogramdd_TensorList_bins::schema> create_histogramdd_TensorList_bins_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(histogramdd_TensorList_bins::name, histogramdd_TensorList_bins::overload_name)
      .typed<histogramdd_TensorList_bins::schema>();
}

// aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_TensorList_bins::call(const at::Tensor & self, at::TensorList bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogramdd_TensorList_bins_typed_handle();
    return op.call(self, bins, range, weight, density);
}

// aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_TensorList_bins::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density) {
    
    static auto op = create_histogramdd_TensorList_bins_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, range, weight, density);
}

// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fmod_Scalar_out::schema> create_fmod_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fmod_Scalar_out::name, fmod_Scalar_out::overload_name)
      .typed<fmod_Scalar_out::schema>();
}

// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fmod_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_fmod_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fmod_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_fmod_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fmod_Scalar::schema> create_fmod_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fmod_Scalar::name, fmod_Scalar::overload_name)
      .typed<fmod_Scalar::schema>();
}

// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor fmod_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_fmod_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor fmod_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_fmod_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fmod__Scalar::schema> create_fmod__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fmod__Scalar::name, fmod__Scalar::overload_name)
      .typed<fmod__Scalar::schema>();
}

// aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & fmod__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_fmod__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & fmod__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_fmod__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fmod_Tensor_out::schema> create_fmod_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fmod_Tensor_out::name, fmod_Tensor_out::overload_name)
      .typed<fmod_Tensor_out::schema>();
}

// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fmod_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_fmod_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fmod_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_fmod_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fmod_Tensor::schema> create_fmod_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fmod_Tensor::name, fmod_Tensor::overload_name)
      .typed<fmod_Tensor::schema>();
}

// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor fmod_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_fmod_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor fmod_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_fmod_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fmod__Tensor::schema> create_fmod__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fmod__Tensor::name, fmod__Tensor::overload_name)
      .typed<fmod__Tensor::schema>();
}

// aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & fmod__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_fmod__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & fmod__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_fmod__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hypot_out::schema> create_hypot_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hypot_out::name, hypot_out::overload_name)
      .typed<hypot_out::schema>();
}

// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hypot_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_hypot_out_typed_handle();
    return op.call(self, other, out);
}

// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hypot_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_hypot_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::hypot(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hypot::schema> create_hypot_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hypot::name, hypot::overload_name)
      .typed<hypot::schema>();
}

// aten::hypot(Tensor self, Tensor other) -> Tensor
at::Tensor hypot::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_hypot_typed_handle();
    return op.call(self, other);
}

// aten::hypot(Tensor self, Tensor other) -> Tensor
at::Tensor hypot::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_hypot_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hypot_::schema> create_hypot__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hypot_::name, hypot_::overload_name)
      .typed<hypot_::schema>();
}

// aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & hypot_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_hypot__typed_handle();
    return op.call(self, other);
}

// aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & hypot_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_hypot__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<igamma_out::schema> create_igamma_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(igamma_out::name, igamma_out::overload_name)
      .typed<igamma_out::schema>();
}

// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & igamma_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_igamma_out_typed_handle();
    return op.call(self, other, out);
}

// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & igamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_igamma_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::igamma(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<igamma::schema> create_igamma_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(igamma::name, igamma::overload_name)
      .typed<igamma::schema>();
}

// aten::igamma(Tensor self, Tensor other) -> Tensor
at::Tensor igamma::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_igamma_typed_handle();
    return op.call(self, other);
}

// aten::igamma(Tensor self, Tensor other) -> Tensor
at::Tensor igamma::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_igamma_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<igamma_::schema> create_igamma__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(igamma_::name, igamma_::overload_name)
      .typed<igamma_::schema>();
}

// aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & igamma_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_igamma__typed_handle();
    return op.call(self, other);
}

// aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & igamma_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_igamma__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<igammac_out::schema> create_igammac_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(igammac_out::name, igammac_out::overload_name)
      .typed<igammac_out::schema>();
}

// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & igammac_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_igammac_out_typed_handle();
    return op.call(self, other, out);
}

// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & igammac_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_igammac_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::igammac(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<igammac::schema> create_igammac_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(igammac::name, igammac::overload_name)
      .typed<igammac::schema>();
}

// aten::igammac(Tensor self, Tensor other) -> Tensor
at::Tensor igammac::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_igammac_typed_handle();
    return op.call(self, other);
}

// aten::igammac(Tensor self, Tensor other) -> Tensor
at::Tensor igammac::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_igammac_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<igammac_::schema> create_igammac__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(igammac_::name, igammac_::overload_name)
      .typed<igammac_::schema>();
}

// aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & igammac_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_igammac__typed_handle();
    return op.call(self, other);
}

// aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & igammac_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_igammac__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nextafter_out::schema> create_nextafter_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nextafter_out::name, nextafter_out::overload_name)
      .typed<nextafter_out::schema>();
}

// aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nextafter_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_nextafter_out_typed_handle();
    return op.call(self, other, out);
}

// aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nextafter_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_nextafter_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::nextafter(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nextafter::schema> create_nextafter_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nextafter::name, nextafter::overload_name)
      .typed<nextafter::schema>();
}

// aten::nextafter(Tensor self, Tensor other) -> Tensor
at::Tensor nextafter::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_nextafter_typed_handle();
    return op.call(self, other);
}

// aten::nextafter(Tensor self, Tensor other) -> Tensor
at::Tensor nextafter::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_nextafter_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nextafter_::schema> create_nextafter__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nextafter_::name, nextafter_::overload_name)
      .typed<nextafter_::schema>();
}

// aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & nextafter_::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_nextafter__typed_handle();
    return op.call(self, other);
}

// aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & nextafter_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_nextafter__typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<remainder_Scalar_out::schema> create_remainder_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(remainder_Scalar_out::name, remainder_Scalar_out::overload_name)
      .typed<remainder_Scalar_out::schema>();
}

// aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & remainder_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_remainder_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & remainder_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_remainder_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<remainder_Scalar::schema> create_remainder_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(remainder_Scalar::name, remainder_Scalar::overload_name)
      .typed<remainder_Scalar::schema>();
}

// aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor remainder_Scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_remainder_Scalar_typed_handle();
    return op.call(self, other);
}

// aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor
at::Tensor remainder_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_remainder_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<remainder__Scalar::schema> create_remainder__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(remainder__Scalar::name, remainder__Scalar::overload_name)
      .typed<remainder__Scalar::schema>();
}

// aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & remainder__Scalar::call(at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_remainder__Scalar_typed_handle();
    return op.call(self, other);
}

// aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
at::Tensor & remainder__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_remainder__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<remainder_Tensor_out::schema> create_remainder_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(remainder_Tensor_out::name, remainder_Tensor_out::overload_name)
      .typed<remainder_Tensor_out::schema>();
}

// aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & remainder_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_remainder_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & remainder_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_remainder_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<remainder_Tensor::schema> create_remainder_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(remainder_Tensor::name, remainder_Tensor::overload_name)
      .typed<remainder_Tensor::schema>();
}

// aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor remainder_Tensor::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_remainder_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor
at::Tensor remainder_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_remainder_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<remainder__Tensor::schema> create_remainder__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(remainder__Tensor::name, remainder__Tensor::overload_name)
      .typed<remainder__Tensor::schema>();
}

// aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & remainder__Tensor::call(at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_remainder__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
at::Tensor & remainder__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_remainder__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<remainder_Scalar_Tensor::schema> create_remainder_Scalar_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(remainder_Scalar_Tensor::name, remainder_Scalar_Tensor::overload_name)
      .typed<remainder_Scalar_Tensor::schema>();
}

// aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
at::Tensor remainder_Scalar_Tensor::call(const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_remainder_Scalar_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
at::Tensor remainder_Scalar_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_remainder_Scalar_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::min(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<min::schema> create_min_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(min::name, min::overload_name)
      .typed<min::schema>();
}

// aten::min(Tensor self) -> Tensor
at::Tensor min::call(const at::Tensor & self) {
    
    static auto op = create_min_typed_handle();
    return op.call(self);
}

// aten::min(Tensor self) -> Tensor
at::Tensor min::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_min_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<min_unary_out::schema> create_min_unary_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(min_unary_out::name, min_unary_out::overload_name)
      .typed<min_unary_out::schema>();
}

// aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & min_unary_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_min_unary_out_typed_handle();
    return op.call(self, out);
}

// aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & min_unary_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_min_unary_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::fmin(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fmin::schema> create_fmin_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fmin::name, fmin::overload_name)
      .typed<fmin::schema>();
}

// aten::fmin(Tensor self, Tensor other) -> Tensor
at::Tensor fmin::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_fmin_typed_handle();
    return op.call(self, other);
}

// aten::fmin(Tensor self, Tensor other) -> Tensor
at::Tensor fmin::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_fmin_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fmin_out::schema> create_fmin_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fmin_out::name, fmin_out::overload_name)
      .typed<fmin_out::schema>();
}

// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fmin_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_fmin_out_typed_handle();
    return op.call(self, other, out);
}

// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fmin_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_fmin_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::max(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<max::schema> create_max_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max::name, max::overload_name)
      .typed<max::schema>();
}

// aten::max(Tensor self) -> Tensor
at::Tensor max::call(const at::Tensor & self) {
    
    static auto op = create_max_typed_handle();
    return op.call(self);
}

// aten::max(Tensor self) -> Tensor
at::Tensor max::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_max_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::fmax(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fmax::schema> create_fmax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fmax::name, fmax::overload_name)
      .typed<fmax::schema>();
}

// aten::fmax(Tensor self, Tensor other) -> Tensor
at::Tensor fmax::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_fmax_typed_handle();
    return op.call(self, other);
}

// aten::fmax(Tensor self, Tensor other) -> Tensor
at::Tensor fmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_fmax_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fmax_out::schema> create_fmax_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fmax_out::name, fmax_out::overload_name)
      .typed<fmax_out::schema>();
}

// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fmax_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_fmax_out_typed_handle();
    return op.call(self, other, out);
}

// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_fmax_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::maximum(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<maximum::schema> create_maximum_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(maximum::name, maximum::overload_name)
      .typed<maximum::schema>();
}

// aten::maximum(Tensor self, Tensor other) -> Tensor
at::Tensor maximum::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_maximum_typed_handle();
    return op.call(self, other);
}

// aten::maximum(Tensor self, Tensor other) -> Tensor
at::Tensor maximum::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_maximum_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<maximum_out::schema> create_maximum_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(maximum_out::name, maximum_out::overload_name)
      .typed<maximum_out::schema>();
}

// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & maximum_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_maximum_out_typed_handle();
    return op.call(self, other, out);
}

// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & maximum_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_maximum_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::max.other(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<max_other::schema> create_max_other_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_other::name, max_other::overload_name)
      .typed<max_other::schema>();
}

// aten::max.other(Tensor self, Tensor other) -> Tensor
at::Tensor max_other::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_max_other_typed_handle();
    return op.call(self, other);
}

// aten::max.other(Tensor self, Tensor other) -> Tensor
at::Tensor max_other::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_max_other_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<max_out::schema> create_max_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_out::name, max_out::overload_name)
      .typed<max_out::schema>();
}

// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & max_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_max_out_typed_handle();
    return op.call(self, other, out);
}

// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & max_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_max_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<max_unary_out::schema> create_max_unary_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_unary_out::name, max_unary_out::overload_name)
      .typed<max_unary_out::schema>();
}

// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & max_unary_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_max_unary_out_typed_handle();
    return op.call(self, out);
}

// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & max_unary_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_max_unary_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::minimum(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<minimum::schema> create_minimum_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(minimum::name, minimum::overload_name)
      .typed<minimum::schema>();
}

// aten::minimum(Tensor self, Tensor other) -> Tensor
at::Tensor minimum::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_minimum_typed_handle();
    return op.call(self, other);
}

// aten::minimum(Tensor self, Tensor other) -> Tensor
at::Tensor minimum::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_minimum_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<minimum_out::schema> create_minimum_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(minimum_out::name, minimum_out::overload_name)
      .typed<minimum_out::schema>();
}

// aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & minimum_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_minimum_out_typed_handle();
    return op.call(self, other, out);
}

// aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & minimum_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_minimum_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<min_out::schema> create_min_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(min_out::name, min_out::overload_name)
      .typed<min_out::schema>();
}

// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & min_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_min_out_typed_handle();
    return op.call(self, other, out);
}

// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & min_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_min_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::min.other(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<min_other::schema> create_min_other_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(min_other::name, min_other::overload_name)
      .typed<min_other::schema>();
}

// aten::min.other(Tensor self, Tensor other) -> Tensor
at::Tensor min_other::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_min_other_typed_handle();
    return op.call(self, other);
}

// aten::min.other(Tensor self, Tensor other) -> Tensor
at::Tensor min_other::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_min_other_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantile::schema> create_quantile_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantile::name, quantile::overload_name)
      .typed<quantile::schema>();
}

// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
at::Tensor quantile::call(const at::Tensor & self, const at::Tensor & q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
    
    static auto op = create_quantile_typed_handle();
    return op.call(self, q, dim, keepdim, interpolation);
}

// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
at::Tensor quantile::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
    
    static auto op = create_quantile_typed_handle();
    return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation);
}

// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<quantile_out::schema> create_quantile_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantile_out::name, quantile_out::overload_name)
      .typed<quantile_out::schema>();
}

// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantile_out::call(const at::Tensor & self, const at::Tensor & q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
    
    static auto op = create_quantile_out_typed_handle();
    return op.call(self, q, dim, keepdim, interpolation, out);
}

// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantile_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
    
    static auto op = create_quantile_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out);
}

// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<quantile_scalar::schema> create_quantile_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantile_scalar::name, quantile_scalar::overload_name)
      .typed<quantile_scalar::schema>();
}

// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
at::Tensor quantile_scalar::call(const at::Tensor & self, double q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
    
    static auto op = create_quantile_scalar_typed_handle();
    return op.call(self, q, dim, keepdim, interpolation);
}

// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
at::Tensor quantile_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
    
    static auto op = create_quantile_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation);
}

// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<quantile_scalar_out::schema> create_quantile_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantile_scalar_out::name, quantile_scalar_out::overload_name)
      .typed<quantile_scalar_out::schema>();
}

// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantile_scalar_out::call(const at::Tensor & self, double q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
    
    static auto op = create_quantile_scalar_out_typed_handle();
    return op.call(self, q, dim, keepdim, interpolation, out);
}

// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantile_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
    
    static auto op = create_quantile_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out);
}

// aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nanquantile::schema> create_nanquantile_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nanquantile::name, nanquantile::overload_name)
      .typed<nanquantile::schema>();
}

// aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
at::Tensor nanquantile::call(const at::Tensor & self, const at::Tensor & q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
    
    static auto op = create_nanquantile_typed_handle();
    return op.call(self, q, dim, keepdim, interpolation);
}

// aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
at::Tensor nanquantile::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
    
    static auto op = create_nanquantile_typed_handle();
    return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation);
}

// aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nanquantile_out::schema> create_nanquantile_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nanquantile_out::name, nanquantile_out::overload_name)
      .typed<nanquantile_out::schema>();
}

// aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
at::Tensor & nanquantile_out::call(const at::Tensor & self, const at::Tensor & q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
    
    static auto op = create_nanquantile_out_typed_handle();
    return op.call(self, q, dim, keepdim, interpolation, out);
}

// aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
at::Tensor & nanquantile_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
    
    static auto op = create_nanquantile_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out);
}

// aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nanquantile_scalar::schema> create_nanquantile_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nanquantile_scalar::name, nanquantile_scalar::overload_name)
      .typed<nanquantile_scalar::schema>();
}

// aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
at::Tensor nanquantile_scalar::call(const at::Tensor & self, double q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
    
    static auto op = create_nanquantile_scalar_typed_handle();
    return op.call(self, q, dim, keepdim, interpolation);
}

// aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
at::Tensor nanquantile_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
    
    static auto op = create_nanquantile_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation);
}

// aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nanquantile_scalar_out::schema> create_nanquantile_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nanquantile_scalar_out::name, nanquantile_scalar_out::overload_name)
      .typed<nanquantile_scalar_out::schema>();
}

// aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
at::Tensor & nanquantile_scalar_out::call(const at::Tensor & self, double q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
    
    static auto op = create_nanquantile_scalar_out_typed_handle();
    return op.call(self, q, dim, keepdim, interpolation, out);
}

// aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
at::Tensor & nanquantile_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
    
    static auto op = create_nanquantile_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out);
}

// aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort_values::schema> create_sort_values_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort_values::name, sort_values::overload_name)
      .typed<sort_values::schema>();
}

// aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_values::call(const at::Tensor & self, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_values_typed_handle();
    return op.call(self, dim, descending, values, indices);
}

// aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_values_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, descending, values, indices);
}

// aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort_values_stable::schema> create_sort_values_stable_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort_values_stable::name, sort_values_stable::overload_name)
      .typed<sort_values_stable::schema>();
}

// aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_values_stable::call(const at::Tensor & self, ::std::optional<bool> stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_values_stable_typed_handle();
    return op.call(self, stable, dim, descending, values, indices);
}

// aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_values_stable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<bool> stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_values_stable_typed_handle();
    return op.redispatch(dispatchKeySet, self, stable, dim, descending, values, indices);
}

// aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort::schema> create_sort_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort::name, sort::overload_name)
      .typed<sort::schema>();
}

// aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort::call(const at::Tensor & self, int64_t dim, bool descending) {
    
    static auto op = create_sort_typed_handle();
    return op.call(self, dim, descending);
}

// aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool descending) {
    
    static auto op = create_sort_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, descending);
}

// aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort_stable::schema> create_sort_stable_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort_stable::name, sort_stable::overload_name)
      .typed<sort_stable::schema>();
}

// aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort_stable::call(const at::Tensor & self, ::std::optional<bool> stable, int64_t dim, bool descending) {
    
    static auto op = create_sort_stable_typed_handle();
    return op.call(self, stable, dim, descending);
}

// aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort_stable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<bool> stable, int64_t dim, bool descending) {
    
    static auto op = create_sort_stable_typed_handle();
    return op.redispatch(dispatchKeySet, self, stable, dim, descending);
}

// aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort_dimname_values::schema> create_sort_dimname_values_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort_dimname_values::name, sort_dimname_values::overload_name)
      .typed<sort_dimname_values::schema>();
}

// aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_dimname_values::call(const at::Tensor & self, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_dimname_values_typed_handle();
    return op.call(self, dim, descending, values, indices);
}

// aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_dimname_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_dimname_values_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, descending, values, indices);
}

// aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort_dimname_values_stable::schema> create_sort_dimname_values_stable_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort_dimname_values_stable::name, sort_dimname_values_stable::overload_name)
      .typed<sort_dimname_values_stable::schema>();
}

// aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_dimname_values_stable::call(const at::Tensor & self, ::std::optional<bool> stable, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_dimname_values_stable_typed_handle();
    return op.call(self, stable, dim, descending, values, indices);
}

// aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> sort_dimname_values_stable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<bool> stable, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_sort_dimname_values_stable_typed_handle();
    return op.redispatch(dispatchKeySet, self, stable, dim, descending, values, indices);
}

// aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort_dimname::schema> create_sort_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort_dimname::name, sort_dimname::overload_name)
      .typed<sort_dimname::schema>();
}

// aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort_dimname::call(const at::Tensor & self, at::Dimname dim, bool descending) {
    
    static auto op = create_sort_dimname_typed_handle();
    return op.call(self, dim, descending);
}

// aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool descending) {
    
    static auto op = create_sort_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, descending);
}

// aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<sort_dimname_stable::schema> create_sort_dimname_stable_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sort_dimname_stable::name, sort_dimname_stable::overload_name)
      .typed<sort_dimname_stable::schema>();
}

// aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort_dimname_stable::call(const at::Tensor & self, ::std::optional<bool> stable, at::Dimname dim, bool descending) {
    
    static auto op = create_sort_dimname_stable_typed_handle();
    return op.call(self, stable, dim, descending);
}

// aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> sort_dimname_stable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<bool> stable, at::Dimname dim, bool descending) {
    
    static auto op = create_sort_dimname_stable_typed_handle();
    return op.redispatch(dispatchKeySet, self, stable, dim, descending);
}

// aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<msort_out::schema> create_msort_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(msort_out::name, msort_out::overload_name)
      .typed<msort_out::schema>();
}

// aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & msort_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_msort_out_typed_handle();
    return op.call(self, out);
}

// aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & msort_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_msort_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::msort(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<msort::schema> create_msort_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(msort::name, msort::overload_name)
      .typed<msort::schema>();
}

// aten::msort(Tensor self) -> Tensor
at::Tensor msort::call(const at::Tensor & self) {
    
    static auto op = create_msort_typed_handle();
    return op.call(self);
}

// aten::msort(Tensor self) -> Tensor
at::Tensor msort::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_msort_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<argsort::schema> create_argsort_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(argsort::name, argsort::overload_name)
      .typed<argsort::schema>();
}

// aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor
at::Tensor argsort::call(const at::Tensor & self, int64_t dim, bool descending) {
    
    static auto op = create_argsort_typed_handle();
    return op.call(self, dim, descending);
}

// aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor
at::Tensor argsort::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool descending) {
    
    static auto op = create_argsort_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, descending);
}

// aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<argsort_stable::schema> create_argsort_stable_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(argsort_stable::name, argsort_stable::overload_name)
      .typed<argsort_stable::schema>();
}

// aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor
at::Tensor argsort_stable::call(const at::Tensor & self, bool stable, int64_t dim, bool descending) {
    
    static auto op = create_argsort_stable_typed_handle();
    return op.call(self, stable, dim, descending);
}

// aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor
at::Tensor argsort_stable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool stable, int64_t dim, bool descending) {
    
    static auto op = create_argsort_stable_typed_handle();
    return op.redispatch(dispatchKeySet, self, stable, dim, descending);
}

// aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<argsort_stable_out::schema> create_argsort_stable_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(argsort_stable_out::name, argsort_stable_out::overload_name)
      .typed<argsort_stable_out::schema>();
}

// aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & argsort_stable_out::call(const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out) {
    
    static auto op = create_argsort_stable_out_typed_handle();
    return op.call(self, stable, dim, descending, out);
}

// aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & argsort_stable_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out) {
    
    static auto op = create_argsort_stable_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, stable, dim, descending, out);
}

// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<argsort_dimname::schema> create_argsort_dimname_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(argsort_dimname::name, argsort_dimname::overload_name)
      .typed<argsort_dimname::schema>();
}

// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor
at::Tensor argsort_dimname::call(const at::Tensor & self, at::Dimname dim, bool descending) {
    
    static auto op = create_argsort_dimname_typed_handle();
    return op.call(self, dim, descending);
}

// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor
at::Tensor argsort_dimname::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool descending) {
    
    static auto op = create_argsort_dimname_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, descending);
}

// aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
static C10_NOINLINE c10::TypedOperatorHandle<topk_values::schema> create_topk_values_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(topk_values::name, topk_values::overload_name)
      .typed<topk_values::schema>();
}

// aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> topk_values::call(const at::Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_topk_values_typed_handle();
    return op.call(self, k, dim, largest, sorted, values, indices);
}

// aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
::std::tuple<at::Tensor &,at::Tensor &> topk_values::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices) {
    
    static auto op = create_topk_values_typed_handle();
    return op.redispatch(dispatchKeySet, self, k, dim, largest, sorted, values, indices);
}

// aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)
static C10_NOINLINE c10::TypedOperatorHandle<topk::schema> create_topk_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(topk::name, topk::overload_name)
      .typed<topk::schema>();
}

// aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> topk::call(const at::Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted) {
    
    static auto op = create_topk_typed_handle();
    return op.call(self, k, dim, largest, sorted);
}

// aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)
::std::tuple<at::Tensor,at::Tensor> topk::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted) {
    
    static auto op = create_topk_typed_handle();
    return op.redispatch(dispatchKeySet, self, k, dim, largest, sorted);
}

// aten::all(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<all::schema> create_all_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all::name, all::overload_name)
      .typed<all::schema>();
}

// aten::all(Tensor self) -> Tensor
at::Tensor all::call(const at::Tensor & self) {
    
    static auto op = create_all_typed_handle();
    return op.call(self);
}

// aten::all(Tensor self) -> Tensor
at::Tensor all::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_all_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<all_all_out::schema> create_all_all_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(all_all_out::name, all_all_out::overload_name)
      .typed<all_all_out::schema>();
}

// aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_all_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_all_all_out_typed_handle();
    return op.call(self, out);
}

// aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & all_all_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_all_all_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::any(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<any::schema> create_any_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(any::name, any::overload_name)
      .typed<any::schema>();
}

// aten::any(Tensor self) -> Tensor
at::Tensor any::call(const at::Tensor & self) {
    
    static auto op = create_any_typed_handle();
    return op.call(self);
}

// aten::any(Tensor self) -> Tensor
at::Tensor any::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_any_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<any_all_out::schema> create_any_all_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(any_all_out::name, any_all_out::overload_name)
      .typed<any_all_out::schema>();
}

// aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & any_all_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_any_all_out_typed_handle();
    return op.call(self, out);
}

// aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & any_all_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_any_all_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<renorm_out::schema> create_renorm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(renorm_out::name, renorm_out::overload_name)
      .typed<renorm_out::schema>();
}

// aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & renorm_out::call(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out) {
    
    static auto op = create_renorm_out_typed_handle();
    return op.call(self, p, dim, maxnorm, out);
}

// aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & renorm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out) {
    
    static auto op = create_renorm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, maxnorm, out);
}

// aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<renorm::schema> create_renorm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(renorm::name, renorm::overload_name)
      .typed<renorm::schema>();
}

// aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor
at::Tensor renorm::call(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
    
    static auto op = create_renorm_typed_handle();
    return op.call(self, p, dim, maxnorm);
}

// aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor
at::Tensor renorm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
    
    static auto op = create_renorm_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, maxnorm);
}

// aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<renorm_::schema> create_renorm__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(renorm_::name, renorm_::overload_name)
      .typed<renorm_::schema>();
}

// aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)
at::Tensor & renorm_::call(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
    
    static auto op = create_renorm__typed_handle();
    return op.call(self, p, dim, maxnorm);
}

// aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)
at::Tensor & renorm_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
    
    static auto op = create_renorm__typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, maxnorm);
}

// aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<unfold::schema> create_unfold_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unfold::name, unfold::overload_name)
      .typed<unfold::schema>();
}

// aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)
at::Tensor unfold::call(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
    
    static auto op = create_unfold_typed_handle();
    return op.call(self, dimension, size, step);
}

// aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)
at::Tensor unfold::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
    
    static auto op = create_unfold_typed_handle();
    return op.redispatch(dispatchKeySet, self, dimension, size, step);
}

// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<unfold_backward::schema> create_unfold_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unfold_backward::name, unfold_backward::overload_name)
      .typed<unfold_backward::schema>();
}

// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor
at::Tensor unfold_backward::call(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
    
    static auto op = create_unfold_backward_typed_handle();
    return op.call(grad_in, input_sizes, dim, size, step);
}

// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor
at::Tensor unfold_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
    
    static auto op = create_unfold_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_in, input_sizes, dim, size, step);
}

// aten::equal(Tensor self, Tensor other) -> bool
static C10_NOINLINE c10::TypedOperatorHandle<equal::schema> create_equal_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(equal::name, equal::overload_name)
      .typed<equal::schema>();
}

// aten::equal(Tensor self, Tensor other) -> bool
bool equal::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_equal_typed_handle();
    return op.call(self, other);
}

// aten::equal(Tensor self, Tensor other) -> bool
bool equal::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_equal_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<pow_Tensor_Tensor_out::schema> create_pow_Tensor_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pow_Tensor_Tensor_out::name, pow_Tensor_Tensor_out::overload_name)
      .typed<pow_Tensor_Tensor_out::schema>();
}

// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & pow_Tensor_Tensor_out::call(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
    
    static auto op = create_pow_Tensor_Tensor_out_typed_handle();
    return op.call(self, exponent, out);
}

// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & pow_Tensor_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
    
    static auto op = create_pow_Tensor_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent, out);
}

// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<pow_Tensor_Tensor::schema> create_pow_Tensor_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pow_Tensor_Tensor::name, pow_Tensor_Tensor::overload_name)
      .typed<pow_Tensor_Tensor::schema>();
}

// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
at::Tensor pow_Tensor_Tensor::call(const at::Tensor & self, const at::Tensor & exponent) {
    
    static auto op = create_pow_Tensor_Tensor_typed_handle();
    return op.call(self, exponent);
}

// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
at::Tensor pow_Tensor_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent) {
    
    static auto op = create_pow_Tensor_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<pow_Scalar_out::schema> create_pow_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pow_Scalar_out::name, pow_Scalar_out::overload_name)
      .typed<pow_Scalar_out::schema>();
}

// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & pow_Scalar_out::call(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
    
    static auto op = create_pow_Scalar_out_typed_handle();
    return op.call(self, exponent, out);
}

// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & pow_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
    
    static auto op = create_pow_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent, out);
}

// aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<pow_Scalar::schema> create_pow_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pow_Scalar::name, pow_Scalar::overload_name)
      .typed<pow_Scalar::schema>();
}

// aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor
at::Tensor pow_Scalar::call(const at::Scalar & self, const at::Tensor & exponent) {
    
    static auto op = create_pow_Scalar_typed_handle();
    return op.call(self, exponent);
}

// aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor
at::Tensor pow_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent) {
    
    static auto op = create_pow_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<pow_Tensor_Scalar_out::schema> create_pow_Tensor_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pow_Tensor_Scalar_out::name, pow_Tensor_Scalar_out::overload_name)
      .typed<pow_Tensor_Scalar_out::schema>();
}

// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & pow_Tensor_Scalar_out::call(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
    
    static auto op = create_pow_Tensor_Scalar_out_typed_handle();
    return op.call(self, exponent, out);
}

// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & pow_Tensor_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
    
    static auto op = create_pow_Tensor_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent, out);
}

// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<pow_Tensor_Scalar::schema> create_pow_Tensor_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pow_Tensor_Scalar::name, pow_Tensor_Scalar::overload_name)
      .typed<pow_Tensor_Scalar::schema>();
}

// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
at::Tensor pow_Tensor_Scalar::call(const at::Tensor & self, const at::Scalar & exponent) {
    
    static auto op = create_pow_Tensor_Scalar_typed_handle();
    return op.call(self, exponent);
}

// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
at::Tensor pow_Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent) {
    
    static auto op = create_pow_Tensor_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<pow__Scalar::schema> create_pow__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pow__Scalar::name, pow__Scalar::overload_name)
      .typed<pow__Scalar::schema>();
}

// aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
at::Tensor & pow__Scalar::call(at::Tensor & self, const at::Scalar & exponent) {
    
    static auto op = create_pow__Scalar_typed_handle();
    return op.call(self, exponent);
}

// aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
at::Tensor & pow__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent) {
    
    static auto op = create_pow__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<pow__Tensor::schema> create_pow__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pow__Tensor::name, pow__Tensor::overload_name)
      .typed<pow__Tensor::schema>();
}

// aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
at::Tensor & pow__Tensor::call(at::Tensor & self, const at::Tensor & exponent) {
    
    static auto op = create_pow__Tensor_typed_handle();
    return op.call(self, exponent);
}

// aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
at::Tensor & pow__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent) {
    
    static auto op = create_pow__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<float_power_Tensor_Tensor_out::schema> create_float_power_Tensor_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(float_power_Tensor_Tensor_out::name, float_power_Tensor_Tensor_out::overload_name)
      .typed<float_power_Tensor_Tensor_out::schema>();
}

// aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & float_power_Tensor_Tensor_out::call(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
    
    static auto op = create_float_power_Tensor_Tensor_out_typed_handle();
    return op.call(self, exponent, out);
}

// aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & float_power_Tensor_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
    
    static auto op = create_float_power_Tensor_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent, out);
}

// aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<float_power_Tensor_Tensor::schema> create_float_power_Tensor_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(float_power_Tensor_Tensor::name, float_power_Tensor_Tensor::overload_name)
      .typed<float_power_Tensor_Tensor::schema>();
}

// aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
at::Tensor float_power_Tensor_Tensor::call(const at::Tensor & self, const at::Tensor & exponent) {
    
    static auto op = create_float_power_Tensor_Tensor_typed_handle();
    return op.call(self, exponent);
}

// aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
at::Tensor float_power_Tensor_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent) {
    
    static auto op = create_float_power_Tensor_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<float_power_Scalar_out::schema> create_float_power_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(float_power_Scalar_out::name, float_power_Scalar_out::overload_name)
      .typed<float_power_Scalar_out::schema>();
}

// aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & float_power_Scalar_out::call(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
    
    static auto op = create_float_power_Scalar_out_typed_handle();
    return op.call(self, exponent, out);
}

// aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & float_power_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
    
    static auto op = create_float_power_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent, out);
}

// aten::float_power.Scalar(Scalar self, Tensor exponent) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<float_power_Scalar::schema> create_float_power_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(float_power_Scalar::name, float_power_Scalar::overload_name)
      .typed<float_power_Scalar::schema>();
}

// aten::float_power.Scalar(Scalar self, Tensor exponent) -> Tensor
at::Tensor float_power_Scalar::call(const at::Scalar & self, const at::Tensor & exponent) {
    
    static auto op = create_float_power_Scalar_typed_handle();
    return op.call(self, exponent);
}

// aten::float_power.Scalar(Scalar self, Tensor exponent) -> Tensor
at::Tensor float_power_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent) {
    
    static auto op = create_float_power_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<float_power_Tensor_Scalar_out::schema> create_float_power_Tensor_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(float_power_Tensor_Scalar_out::name, float_power_Tensor_Scalar_out::overload_name)
      .typed<float_power_Tensor_Scalar_out::schema>();
}

// aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & float_power_Tensor_Scalar_out::call(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
    
    static auto op = create_float_power_Tensor_Scalar_out_typed_handle();
    return op.call(self, exponent, out);
}

// aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & float_power_Tensor_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
    
    static auto op = create_float_power_Tensor_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent, out);
}

// aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<float_power_Tensor_Scalar::schema> create_float_power_Tensor_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(float_power_Tensor_Scalar::name, float_power_Tensor_Scalar::overload_name)
      .typed<float_power_Tensor_Scalar::schema>();
}

// aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
at::Tensor float_power_Tensor_Scalar::call(const at::Tensor & self, const at::Scalar & exponent) {
    
    static auto op = create_float_power_Tensor_Scalar_typed_handle();
    return op.call(self, exponent);
}

// aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
at::Tensor float_power_Tensor_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent) {
    
    static auto op = create_float_power_Tensor_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<float_power__Scalar::schema> create_float_power__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(float_power__Scalar::name, float_power__Scalar::overload_name)
      .typed<float_power__Scalar::schema>();
}

// aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
at::Tensor & float_power__Scalar::call(at::Tensor & self, const at::Scalar & exponent) {
    
    static auto op = create_float_power__Scalar_typed_handle();
    return op.call(self, exponent);
}

// aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
at::Tensor & float_power__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent) {
    
    static auto op = create_float_power__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<float_power__Tensor::schema> create_float_power__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(float_power__Tensor::name, float_power__Tensor::overload_name)
      .typed<float_power__Tensor::schema>();
}

// aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
at::Tensor & float_power__Tensor::call(at::Tensor & self, const at::Tensor & exponent) {
    
    static auto op = create_float_power__Tensor_typed_handle();
    return op.call(self, exponent);
}

// aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
at::Tensor & float_power__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent) {
    
    static auto op = create_float_power__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<normal_::schema> create_normal__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(normal_::name, normal_::overload_name)
      .typed<normal_::schema>();
}

// aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & normal_::call(at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
    
    static auto op = create_normal__typed_handle();
    return op.call(self, mean, std, generator);
}

// aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)
at::Tensor & normal_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
    
    static auto op = create_normal__typed_handle();
    return op.redispatch(dispatchKeySet, self, mean, std, generator);
}

// aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<normal_functional::schema> create_normal_functional_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(normal_functional::name, normal_functional::overload_name)
      .typed<normal_functional::schema>();
}

// aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor
at::Tensor normal_functional::call(const at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
    
    static auto op = create_normal_functional_typed_handle();
    return op.call(self, mean, std, generator);
}

// aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor
at::Tensor normal_functional::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
    
    static auto op = create_normal_functional_typed_handle();
    return op.redispatch(dispatchKeySet, self, mean, std, generator);
}

// aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<normal_Tensor_float_out::schema> create_normal_Tensor_float_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(normal_Tensor_float_out::name, normal_Tensor_float_out::overload_name)
      .typed<normal_Tensor_float_out::schema>();
}

// aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & normal_Tensor_float_out::call(const at::Tensor & mean, double std, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_normal_Tensor_float_out_typed_handle();
    return op.call(mean, std, generator, out);
}

// aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & normal_Tensor_float_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, double std, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_normal_Tensor_float_out_typed_handle();
    return op.redispatch(dispatchKeySet, mean, std, generator, out);
}

// aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<normal_Tensor_float::schema> create_normal_Tensor_float_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(normal_Tensor_float::name, normal_Tensor_float::overload_name)
      .typed<normal_Tensor_float::schema>();
}

// aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor
at::Tensor normal_Tensor_float::call(const at::Tensor & mean, double std, ::std::optional<at::Generator> generator) {
    
    static auto op = create_normal_Tensor_float_typed_handle();
    return op.call(mean, std, generator);
}

// aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor
at::Tensor normal_Tensor_float::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, double std, ::std::optional<at::Generator> generator) {
    
    static auto op = create_normal_Tensor_float_typed_handle();
    return op.redispatch(dispatchKeySet, mean, std, generator);
}

// aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<normal_float_Tensor_out::schema> create_normal_float_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(normal_float_Tensor_out::name, normal_float_Tensor_out::overload_name)
      .typed<normal_float_Tensor_out::schema>();
}

// aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & normal_float_Tensor_out::call(double mean, const at::Tensor & std, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_normal_float_Tensor_out_typed_handle();
    return op.call(mean, std, generator, out);
}

// aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & normal_float_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, double mean, const at::Tensor & std, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_normal_float_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, mean, std, generator, out);
}

// aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<normal_float_Tensor::schema> create_normal_float_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(normal_float_Tensor::name, normal_float_Tensor::overload_name)
      .typed<normal_float_Tensor::schema>();
}

// aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor
at::Tensor normal_float_Tensor::call(double mean, const at::Tensor & std, ::std::optional<at::Generator> generator) {
    
    static auto op = create_normal_float_Tensor_typed_handle();
    return op.call(mean, std, generator);
}

// aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor
at::Tensor normal_float_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, double mean, const at::Tensor & std, ::std::optional<at::Generator> generator) {
    
    static auto op = create_normal_float_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, mean, std, generator);
}

// aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<normal_Tensor_Tensor_out::schema> create_normal_Tensor_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(normal_Tensor_Tensor_out::name, normal_Tensor_Tensor_out::overload_name)
      .typed<normal_Tensor_Tensor_out::schema>();
}

// aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & normal_Tensor_Tensor_out::call(const at::Tensor & mean, const at::Tensor & std, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_normal_Tensor_Tensor_out_typed_handle();
    return op.call(mean, std, generator, out);
}

// aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & normal_Tensor_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, const at::Tensor & std, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_normal_Tensor_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, mean, std, generator, out);
}

// aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<normal_Tensor_Tensor::schema> create_normal_Tensor_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(normal_Tensor_Tensor::name, normal_Tensor_Tensor::overload_name)
      .typed<normal_Tensor_Tensor::schema>();
}

// aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor
at::Tensor normal_Tensor_Tensor::call(const at::Tensor & mean, const at::Tensor & std, ::std::optional<at::Generator> generator) {
    
    static auto op = create_normal_Tensor_Tensor_typed_handle();
    return op.call(mean, std, generator);
}

// aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor
at::Tensor normal_Tensor_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, const at::Tensor & std, ::std::optional<at::Generator> generator) {
    
    static auto op = create_normal_Tensor_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, mean, std, generator);
}

// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<normal_float_float::schema> create_normal_float_float_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(normal_float_float::name, normal_float_float::overload_name)
      .typed<normal_float_float::schema>();
}

// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor normal_float_float::call(double mean, double std, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_normal_float_float_typed_handle();
    return op.call(mean, std, size, generator, dtype, layout, device, pin_memory);
}

// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor normal_float_float::redispatch(c10::DispatchKeySet dispatchKeySet, double mean, double std, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_normal_float_float_typed_handle();
    return op.redispatch(dispatchKeySet, mean, std, size, generator, dtype, layout, device, pin_memory);
}

// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<normal_float_float_out::schema> create_normal_float_float_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(normal_float_float_out::name, normal_float_float_out::overload_name)
      .typed<normal_float_float_out::schema>();
}

// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & normal_float_float_out::call(double mean, double std, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_normal_float_float_out_typed_handle();
    return op.call(mean, std, size, generator, out);
}

// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & normal_float_float_out::redispatch(c10::DispatchKeySet dispatchKeySet, double mean, double std, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_normal_float_float_out_typed_handle();
    return op.redispatch(dispatchKeySet, mean, std, size, generator, out);
}

// aten::alias(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<alias::schema> create_alias_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(alias::name, alias::overload_name)
      .typed<alias::schema>();
}

// aten::alias(Tensor(a) self) -> Tensor(a)
at::Tensor alias::call(const at::Tensor & self) {
    
    static auto op = create_alias_typed_handle();
    return op.call(self);
}

// aten::alias(Tensor(a) self) -> Tensor(a)
at::Tensor alias::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_alias_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_amp_foreach_non_finite_check_and_unscale_::schema> create__amp_foreach_non_finite_check_and_unscale__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_amp_foreach_non_finite_check_and_unscale_::name, _amp_foreach_non_finite_check_and_unscale_::overload_name)
      .typed<_amp_foreach_non_finite_check_and_unscale_::schema>();
}

// aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
void _amp_foreach_non_finite_check_and_unscale_::call(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
    
    static auto op = create__amp_foreach_non_finite_check_and_unscale__typed_handle();
    return op.call(self, found_inf, inv_scale);
}

// aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
void _amp_foreach_non_finite_check_and_unscale_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
    
    static auto op = create__amp_foreach_non_finite_check_and_unscale__typed_handle();
    return op.redispatch(dispatchKeySet, self, found_inf, inv_scale);
}

// aten::_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_amp_update_scale_::schema> create__amp_update_scale__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_amp_update_scale_::name, _amp_update_scale_::overload_name)
      .typed<_amp_update_scale_::schema>();
}

// aten::_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!)
at::Tensor & _amp_update_scale_::call(at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
    
    static auto op = create__amp_update_scale__typed_handle();
    return op.call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
}

// aten::_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!)
at::Tensor & _amp_update_scale_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
    
    static auto op = create__amp_update_scale__typed_handle();
    return op.redispatch(dispatchKeySet, self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
}

// aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add_Scalar::schema> create__foreach_add_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_add_Scalar::name, _foreach_add_Scalar::overload_name)
      .typed<_foreach_add_Scalar::schema>();
}

// aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_add_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_add_Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_add_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_add_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add__Scalar::schema> create__foreach_add__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_add__Scalar::name, _foreach_add__Scalar::overload_name)
      .typed<_foreach_add__Scalar::schema>();
}

// aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_add__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_add__Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_add__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_add__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add_List::schema> create__foreach_add_List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_add_List::name, _foreach_add_List::overload_name)
      .typed<_foreach_add_List::schema>();
}

// aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
::std::vector<at::Tensor> _foreach_add_List::call(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
    
    static auto op = create__foreach_add_List_typed_handle();
    return op.call(self, other, alpha);
}

// aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
::std::vector<at::Tensor> _foreach_add_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
    
    static auto op = create__foreach_add_List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add__List::schema> create__foreach_add__List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_add__List::name, _foreach_add__List::overload_name)
      .typed<_foreach_add__List::schema>();
}

// aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
void _foreach_add__List::call(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
    
    static auto op = create__foreach_add__List_typed_handle();
    return op.call(self, other, alpha);
}

// aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
void _foreach_add__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
    
    static auto op = create__foreach_add__List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add_ScalarList::schema> create__foreach_add_ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_add_ScalarList::name, _foreach_add_ScalarList::overload_name)
      .typed<_foreach_add_ScalarList::schema>();
}

// aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_add_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_add_ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_add_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_add_ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add__ScalarList::schema> create__foreach_add__ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_add__ScalarList::name, _foreach_add__ScalarList::overload_name)
      .typed<_foreach_add__ScalarList::schema>();
}

// aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_add__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_add__ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_add__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_add__ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_add.Tensor(Tensor[] self, Tensor other, *, Scalar alpha=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add_Tensor::schema> create__foreach_add_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_add_Tensor::name, _foreach_add_Tensor::overload_name)
      .typed<_foreach_add_Tensor::schema>();
}

// aten::_foreach_add.Tensor(Tensor[] self, Tensor other, *, Scalar alpha=1) -> Tensor[]
::std::vector<at::Tensor> _foreach_add_Tensor::call(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create__foreach_add_Tensor_typed_handle();
    return op.call(self, other, alpha);
}

// aten::_foreach_add.Tensor(Tensor[] self, Tensor other, *, Scalar alpha=1) -> Tensor[]
::std::vector<at::Tensor> _foreach_add_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create__foreach_add_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::_foreach_add_.Tensor(Tensor(a!)[] self, Tensor other, *, Scalar alpha=1) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add__Tensor::schema> create__foreach_add__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_add__Tensor::name, _foreach_add__Tensor::overload_name)
      .typed<_foreach_add__Tensor::schema>();
}

// aten::_foreach_add_.Tensor(Tensor(a!)[] self, Tensor other, *, Scalar alpha=1) -> ()
void _foreach_add__Tensor::call(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create__foreach_add__Tensor_typed_handle();
    return op.call(self, other, alpha);
}

// aten::_foreach_add_.Tensor(Tensor(a!)[] self, Tensor other, *, Scalar alpha=1) -> ()
void _foreach_add__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create__foreach_add__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub_Scalar::schema> create__foreach_sub_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sub_Scalar::name, _foreach_sub_Scalar::overload_name)
      .typed<_foreach_sub_Scalar::schema>();
}

// aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_sub_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_sub_Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_sub_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_sub_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub__Scalar::schema> create__foreach_sub__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sub__Scalar::name, _foreach_sub__Scalar::overload_name)
      .typed<_foreach_sub__Scalar::schema>();
}

// aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_sub__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_sub__Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_sub__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_sub__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub_List::schema> create__foreach_sub_List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sub_List::name, _foreach_sub_List::overload_name)
      .typed<_foreach_sub_List::schema>();
}

// aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
::std::vector<at::Tensor> _foreach_sub_List::call(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
    
    static auto op = create__foreach_sub_List_typed_handle();
    return op.call(self, other, alpha);
}

// aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
::std::vector<at::Tensor> _foreach_sub_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
    
    static auto op = create__foreach_sub_List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub__List::schema> create__foreach_sub__List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sub__List::name, _foreach_sub__List::overload_name)
      .typed<_foreach_sub__List::schema>();
}

// aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
void _foreach_sub__List::call(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
    
    static auto op = create__foreach_sub__List_typed_handle();
    return op.call(self, other, alpha);
}

// aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
void _foreach_sub__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
    
    static auto op = create__foreach_sub__List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub_ScalarList::schema> create__foreach_sub_ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sub_ScalarList::name, _foreach_sub_ScalarList::overload_name)
      .typed<_foreach_sub_ScalarList::schema>();
}

// aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_sub_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_sub_ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_sub_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_sub_ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub__ScalarList::schema> create__foreach_sub__ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sub__ScalarList::name, _foreach_sub__ScalarList::overload_name)
      .typed<_foreach_sub__ScalarList::schema>();
}

// aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_sub__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_sub__ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_sub__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_sub__ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul_Scalar::schema> create__foreach_mul_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_mul_Scalar::name, _foreach_mul_Scalar::overload_name)
      .typed<_foreach_mul_Scalar::schema>();
}

// aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_mul_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_mul_Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_mul_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_mul_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul__Scalar::schema> create__foreach_mul__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_mul__Scalar::name, _foreach_mul__Scalar::overload_name)
      .typed<_foreach_mul__Scalar::schema>();
}

// aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_mul__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_mul__Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_mul__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_mul__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul_List::schema> create__foreach_mul_List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_mul_List::name, _foreach_mul_List::overload_name)
      .typed<_foreach_mul_List::schema>();
}

// aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]
::std::vector<at::Tensor> _foreach_mul_List::call(at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_mul_List_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]
::std::vector<at::Tensor> _foreach_mul_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_mul_List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul__List::schema> create__foreach_mul__List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_mul__List::name, _foreach_mul__List::overload_name)
      .typed<_foreach_mul__List::schema>();
}

// aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()
void _foreach_mul__List::call(at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_mul__List_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()
void _foreach_mul__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_mul__List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul_ScalarList::schema> create__foreach_mul_ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_mul_ScalarList::name, _foreach_mul_ScalarList::overload_name)
      .typed<_foreach_mul_ScalarList::schema>();
}

// aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_mul_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_mul_ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_mul_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_mul_ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul__ScalarList::schema> create__foreach_mul__ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_mul__ScalarList::name, _foreach_mul__ScalarList::overload_name)
      .typed<_foreach_mul__ScalarList::schema>();
}

// aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_mul__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_mul__ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_mul__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_mul__ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_mul.Tensor(Tensor[] self, Tensor other) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul_Tensor::schema> create__foreach_mul_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_mul_Tensor::name, _foreach_mul_Tensor::overload_name)
      .typed<_foreach_mul_Tensor::schema>();
}

// aten::_foreach_mul.Tensor(Tensor[] self, Tensor other) -> Tensor[]
::std::vector<at::Tensor> _foreach_mul_Tensor::call(at::TensorList self, const at::Tensor & other) {
    
    static auto op = create__foreach_mul_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_mul.Tensor(Tensor[] self, Tensor other) -> Tensor[]
::std::vector<at::Tensor> _foreach_mul_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other) {
    
    static auto op = create__foreach_mul_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_mul_.Tensor(Tensor(a!)[] self, Tensor other) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul__Tensor::schema> create__foreach_mul__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_mul__Tensor::name, _foreach_mul__Tensor::overload_name)
      .typed<_foreach_mul__Tensor::schema>();
}

// aten::_foreach_mul_.Tensor(Tensor(a!)[] self, Tensor other) -> ()
void _foreach_mul__Tensor::call(at::TensorList self, const at::Tensor & other) {
    
    static auto op = create__foreach_mul__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_mul_.Tensor(Tensor(a!)[] self, Tensor other) -> ()
void _foreach_mul__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other) {
    
    static auto op = create__foreach_mul__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div_Scalar::schema> create__foreach_div_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_div_Scalar::name, _foreach_div_Scalar::overload_name)
      .typed<_foreach_div_Scalar::schema>();
}

// aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_div_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_div_Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_div_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_div_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div__Scalar::schema> create__foreach_div__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_div__Scalar::name, _foreach_div__Scalar::overload_name)
      .typed<_foreach_div__Scalar::schema>();
}

// aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_div__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_div__Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_div__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_div__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div_List::schema> create__foreach_div_List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_div_List::name, _foreach_div_List::overload_name)
      .typed<_foreach_div_List::schema>();
}

// aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]
::std::vector<at::Tensor> _foreach_div_List::call(at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_div_List_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]
::std::vector<at::Tensor> _foreach_div_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_div_List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div__List::schema> create__foreach_div__List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_div__List::name, _foreach_div__List::overload_name)
      .typed<_foreach_div__List::schema>();
}

// aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()
void _foreach_div__List::call(at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_div__List_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()
void _foreach_div__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_div__List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div_ScalarList::schema> create__foreach_div_ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_div_ScalarList::name, _foreach_div_ScalarList::overload_name)
      .typed<_foreach_div_ScalarList::schema>();
}

// aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_div_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_div_ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_div_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_div_ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div__ScalarList::schema> create__foreach_div__ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_div__ScalarList::name, _foreach_div__ScalarList::overload_name)
      .typed<_foreach_div__ScalarList::schema>();
}

// aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_div__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_div__ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_div__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_div__ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_div.Tensor(Tensor[] self, Tensor other) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div_Tensor::schema> create__foreach_div_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_div_Tensor::name, _foreach_div_Tensor::overload_name)
      .typed<_foreach_div_Tensor::schema>();
}

// aten::_foreach_div.Tensor(Tensor[] self, Tensor other) -> Tensor[]
::std::vector<at::Tensor> _foreach_div_Tensor::call(at::TensorList self, const at::Tensor & other) {
    
    static auto op = create__foreach_div_Tensor_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_div.Tensor(Tensor[] self, Tensor other) -> Tensor[]
::std::vector<at::Tensor> _foreach_div_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other) {
    
    static auto op = create__foreach_div_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div__Tensor::schema> create__foreach_div__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_div__Tensor::name, _foreach_div__Tensor::overload_name)
      .typed<_foreach_div__Tensor::schema>();
}

// aten::_foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> ()
void _foreach_div__Tensor::call(at::TensorList self, const at::Tensor & other) {
    
    static auto op = create__foreach_div__Tensor_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> ()
void _foreach_div__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other) {
    
    static auto op = create__foreach_div__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max_Scalar::schema> create__foreach_clamp_max_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_max_Scalar::name, _foreach_clamp_max_Scalar::overload_name)
      .typed<_foreach_clamp_max_Scalar::schema>();
}

// aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_clamp_max_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_clamp_max_Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_clamp_max_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_clamp_max_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max__Scalar::schema> create__foreach_clamp_max__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_max__Scalar::name, _foreach_clamp_max__Scalar::overload_name)
      .typed<_foreach_clamp_max__Scalar::schema>();
}

// aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_clamp_max__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_clamp_max__Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_clamp_max__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_clamp_max__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max_List::schema> create__foreach_clamp_max_List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_max_List::name, _foreach_clamp_max_List::overload_name)
      .typed<_foreach_clamp_max_List::schema>();
}

// aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]
::std::vector<at::Tensor> _foreach_clamp_max_List::call(at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_clamp_max_List_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]
::std::vector<at::Tensor> _foreach_clamp_max_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_clamp_max_List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max__List::schema> create__foreach_clamp_max__List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_max__List::name, _foreach_clamp_max__List::overload_name)
      .typed<_foreach_clamp_max__List::schema>();
}

// aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()
void _foreach_clamp_max__List::call(at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_clamp_max__List_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()
void _foreach_clamp_max__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_clamp_max__List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max_ScalarList::schema> create__foreach_clamp_max_ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_max_ScalarList::name, _foreach_clamp_max_ScalarList::overload_name)
      .typed<_foreach_clamp_max_ScalarList::schema>();
}

// aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_clamp_max_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_clamp_max_ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_clamp_max_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_clamp_max_ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max__ScalarList::schema> create__foreach_clamp_max__ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_max__ScalarList::name, _foreach_clamp_max__ScalarList::overload_name)
      .typed<_foreach_clamp_max__ScalarList::schema>();
}

// aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_clamp_max__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_clamp_max__ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_clamp_max__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_clamp_max__ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min_Scalar::schema> create__foreach_clamp_min_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_min_Scalar::name, _foreach_clamp_min_Scalar::overload_name)
      .typed<_foreach_clamp_min_Scalar::schema>();
}

// aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_clamp_min_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_clamp_min_Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_clamp_min_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_clamp_min_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min__Scalar::schema> create__foreach_clamp_min__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_min__Scalar::name, _foreach_clamp_min__Scalar::overload_name)
      .typed<_foreach_clamp_min__Scalar::schema>();
}

// aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_clamp_min__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_clamp_min__Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_clamp_min__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_clamp_min__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min_List::schema> create__foreach_clamp_min_List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_min_List::name, _foreach_clamp_min_List::overload_name)
      .typed<_foreach_clamp_min_List::schema>();
}

// aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[]
::std::vector<at::Tensor> _foreach_clamp_min_List::call(at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_clamp_min_List_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[]
::std::vector<at::Tensor> _foreach_clamp_min_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_clamp_min_List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min__List::schema> create__foreach_clamp_min__List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_min__List::name, _foreach_clamp_min__List::overload_name)
      .typed<_foreach_clamp_min__List::schema>();
}

// aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> ()
void _foreach_clamp_min__List::call(at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_clamp_min__List_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> ()
void _foreach_clamp_min__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_clamp_min__List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min_ScalarList::schema> create__foreach_clamp_min_ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_min_ScalarList::name, _foreach_clamp_min_ScalarList::overload_name)
      .typed<_foreach_clamp_min_ScalarList::schema>();
}

// aten::_foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_clamp_min_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_clamp_min_ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_clamp_min_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_clamp_min_ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min__ScalarList::schema> create__foreach_clamp_min__ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_min__ScalarList::name, _foreach_clamp_min__ScalarList::overload_name)
      .typed<_foreach_clamp_min__ScalarList::schema>();
}

// aten::_foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_clamp_min__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_clamp_min__ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_clamp_min__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_clamp_min__ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum_Scalar::schema> create__foreach_maximum_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_maximum_Scalar::name, _foreach_maximum_Scalar::overload_name)
      .typed<_foreach_maximum_Scalar::schema>();
}

// aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_maximum_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_maximum_Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_maximum_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_maximum_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum__Scalar::schema> create__foreach_maximum__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_maximum__Scalar::name, _foreach_maximum__Scalar::overload_name)
      .typed<_foreach_maximum__Scalar::schema>();
}

// aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_maximum__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_maximum__Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_maximum__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_maximum__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum_List::schema> create__foreach_maximum_List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_maximum_List::name, _foreach_maximum_List::overload_name)
      .typed<_foreach_maximum_List::schema>();
}

// aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]
::std::vector<at::Tensor> _foreach_maximum_List::call(at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_maximum_List_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]
::std::vector<at::Tensor> _foreach_maximum_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_maximum_List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum__List::schema> create__foreach_maximum__List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_maximum__List::name, _foreach_maximum__List::overload_name)
      .typed<_foreach_maximum__List::schema>();
}

// aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
void _foreach_maximum__List::call(at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_maximum__List_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
void _foreach_maximum__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_maximum__List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum_ScalarList::schema> create__foreach_maximum_ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_maximum_ScalarList::name, _foreach_maximum_ScalarList::overload_name)
      .typed<_foreach_maximum_ScalarList::schema>();
}

// aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_maximum_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_maximum_ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_maximum_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_maximum_ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum__ScalarList::schema> create__foreach_maximum__ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_maximum__ScalarList::name, _foreach_maximum__ScalarList::overload_name)
      .typed<_foreach_maximum__ScalarList::schema>();
}

// aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_maximum__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_maximum__ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_maximum__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_maximum__ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum_Scalar::schema> create__foreach_minimum_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_minimum_Scalar::name, _foreach_minimum_Scalar::overload_name)
      .typed<_foreach_minimum_Scalar::schema>();
}

// aten::_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_minimum_Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_minimum_Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
::std::vector<at::Tensor> _foreach_minimum_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_minimum_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum__Scalar::schema> create__foreach_minimum__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_minimum__Scalar::name, _foreach_minimum__Scalar::overload_name)
      .typed<_foreach_minimum__Scalar::schema>();
}

// aten::_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_minimum__Scalar::call(at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_minimum__Scalar_typed_handle();
    return op.call(self, scalar);
}

// aten::_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
void _foreach_minimum__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
    
    static auto op = create__foreach_minimum__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar);
}

// aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum_List::schema> create__foreach_minimum_List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_minimum_List::name, _foreach_minimum_List::overload_name)
      .typed<_foreach_minimum_List::schema>();
}

// aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]
::std::vector<at::Tensor> _foreach_minimum_List::call(at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_minimum_List_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]
::std::vector<at::Tensor> _foreach_minimum_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_minimum_List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum__List::schema> create__foreach_minimum__List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_minimum__List::name, _foreach_minimum__List::overload_name)
      .typed<_foreach_minimum__List::schema>();
}

// aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
void _foreach_minimum__List::call(at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_minimum__List_typed_handle();
    return op.call(self, other);
}

// aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
void _foreach_minimum__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
    
    static auto op = create__foreach_minimum__List_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum_ScalarList::schema> create__foreach_minimum_ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_minimum_ScalarList::name, _foreach_minimum_ScalarList::overload_name)
      .typed<_foreach_minimum_ScalarList::schema>();
}

// aten::_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_minimum_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_minimum_ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_minimum_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_minimum_ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum__ScalarList::schema> create__foreach_minimum__ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_minimum__ScalarList::name, _foreach_minimum__ScalarList::overload_name)
      .typed<_foreach_minimum__ScalarList::schema>();
}

// aten::_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_minimum__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_minimum__ScalarList_typed_handle();
    return op.call(self, scalars);
}

// aten::_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
void _foreach_minimum__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_minimum__ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars);
}

// aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv_Scalar::schema> create__foreach_addcdiv_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv_Scalar::name, _foreach_addcdiv_Scalar::overload_name)
      .typed<_foreach_addcdiv_Scalar::schema>();
}

// aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcdiv_Scalar::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
    
    static auto op = create__foreach_addcdiv_Scalar_typed_handle();
    return op.call(self, tensor1, tensor2, value);
}

// aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcdiv_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
    
    static auto op = create__foreach_addcdiv_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
}

// aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv_ScalarList::schema> create__foreach_addcdiv_ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv_ScalarList::name, _foreach_addcdiv_ScalarList::overload_name)
      .typed<_foreach_addcdiv_ScalarList::schema>();
}

// aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcdiv_ScalarList::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_addcdiv_ScalarList_typed_handle();
    return op.call(self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcdiv_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_addcdiv_ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv_Tensor::schema> create__foreach_addcdiv_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv_Tensor::name, _foreach_addcdiv_Tensor::overload_name)
      .typed<_foreach_addcdiv_Tensor::schema>();
}

// aten::_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcdiv_Tensor::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
    
    static auto op = create__foreach_addcdiv_Tensor_typed_handle();
    return op.call(self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcdiv_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
    
    static auto op = create__foreach_addcdiv_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv__Scalar::schema> create__foreach_addcdiv__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv__Scalar::name, _foreach_addcdiv__Scalar::overload_name)
      .typed<_foreach_addcdiv__Scalar::schema>();
}

// aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
void _foreach_addcdiv__Scalar::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
    
    static auto op = create__foreach_addcdiv__Scalar_typed_handle();
    return op.call(self, tensor1, tensor2, value);
}

// aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
void _foreach_addcdiv__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
    
    static auto op = create__foreach_addcdiv__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
}

// aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv__ScalarList::schema> create__foreach_addcdiv__ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv__ScalarList::name, _foreach_addcdiv__ScalarList::overload_name)
      .typed<_foreach_addcdiv__ScalarList::schema>();
}

// aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
void _foreach_addcdiv__ScalarList::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_addcdiv__ScalarList_typed_handle();
    return op.call(self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
void _foreach_addcdiv__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_addcdiv__ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv__Tensor::schema> create__foreach_addcdiv__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv__Tensor::name, _foreach_addcdiv__Tensor::overload_name)
      .typed<_foreach_addcdiv__Tensor::schema>();
}

// aten::_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
void _foreach_addcdiv__Tensor::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
    
    static auto op = create__foreach_addcdiv__Tensor_typed_handle();
    return op.call(self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
void _foreach_addcdiv__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
    
    static auto op = create__foreach_addcdiv__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul_Scalar::schema> create__foreach_addcmul_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcmul_Scalar::name, _foreach_addcmul_Scalar::overload_name)
      .typed<_foreach_addcmul_Scalar::schema>();
}

// aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcmul_Scalar::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
    
    static auto op = create__foreach_addcmul_Scalar_typed_handle();
    return op.call(self, tensor1, tensor2, value);
}

// aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcmul_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
    
    static auto op = create__foreach_addcmul_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
}

// aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul_ScalarList::schema> create__foreach_addcmul_ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcmul_ScalarList::name, _foreach_addcmul_ScalarList::overload_name)
      .typed<_foreach_addcmul_ScalarList::schema>();
}

// aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcmul_ScalarList::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_addcmul_ScalarList_typed_handle();
    return op.call(self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcmul_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_addcmul_ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul_Tensor::schema> create__foreach_addcmul_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcmul_Tensor::name, _foreach_addcmul_Tensor::overload_name)
      .typed<_foreach_addcmul_Tensor::schema>();
}

// aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcmul_Tensor::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
    
    static auto op = create__foreach_addcmul_Tensor_typed_handle();
    return op.call(self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
::std::vector<at::Tensor> _foreach_addcmul_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
    
    static auto op = create__foreach_addcmul_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul__Scalar::schema> create__foreach_addcmul__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcmul__Scalar::name, _foreach_addcmul__Scalar::overload_name)
      .typed<_foreach_addcmul__Scalar::schema>();
}

// aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
void _foreach_addcmul__Scalar::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
    
    static auto op = create__foreach_addcmul__Scalar_typed_handle();
    return op.call(self, tensor1, tensor2, value);
}

// aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
void _foreach_addcmul__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
    
    static auto op = create__foreach_addcmul__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value);
}

// aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul__ScalarList::schema> create__foreach_addcmul__ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcmul__ScalarList::name, _foreach_addcmul__ScalarList::overload_name)
      .typed<_foreach_addcmul__ScalarList::schema>();
}

// aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
void _foreach_addcmul__ScalarList::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_addcmul__ScalarList_typed_handle();
    return op.call(self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
void _foreach_addcmul__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
    
    static auto op = create__foreach_addcmul__ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul__Tensor::schema> create__foreach_addcmul__Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcmul__Tensor::name, _foreach_addcmul__Tensor::overload_name)
      .typed<_foreach_addcmul__Tensor::schema>();
}

// aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
void _foreach_addcmul__Tensor::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
    
    static auto op = create__foreach_addcmul__Tensor_typed_handle();
    return op.call(self, tensor1, tensor2, scalars);
}

// aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
void _foreach_addcmul__Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
    
    static auto op = create__foreach_addcmul__Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars);
}

// aten::_foreach_abs(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_abs::schema> create__foreach_abs_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_abs::name, _foreach_abs::overload_name)
      .typed<_foreach_abs::schema>();
}

// aten::_foreach_abs(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_abs::call(at::TensorList self) {
    
    static auto op = create__foreach_abs_typed_handle();
    return op.call(self);
}

// aten::_foreach_abs(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_abs::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_abs_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_abs_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_abs_::schema> create__foreach_abs__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_abs_::name, _foreach_abs_::overload_name)
      .typed<_foreach_abs_::schema>();
}

// aten::_foreach_abs_(Tensor(a!)[] self) -> ()
void _foreach_abs_::call(at::TensorList self) {
    
    static auto op = create__foreach_abs__typed_handle();
    return op.call(self);
}

// aten::_foreach_abs_(Tensor(a!)[] self) -> ()
void _foreach_abs_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_abs__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_acos(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_acos::schema> create__foreach_acos_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_acos::name, _foreach_acos::overload_name)
      .typed<_foreach_acos::schema>();
}

// aten::_foreach_acos(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_acos::call(at::TensorList self) {
    
    static auto op = create__foreach_acos_typed_handle();
    return op.call(self);
}

// aten::_foreach_acos(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_acos::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_acos_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_acos_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_acos_::schema> create__foreach_acos__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_acos_::name, _foreach_acos_::overload_name)
      .typed<_foreach_acos_::schema>();
}

// aten::_foreach_acos_(Tensor(a!)[] self) -> ()
void _foreach_acos_::call(at::TensorList self) {
    
    static auto op = create__foreach_acos__typed_handle();
    return op.call(self);
}

// aten::_foreach_acos_(Tensor(a!)[] self) -> ()
void _foreach_acos_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_acos__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_asin(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_asin::schema> create__foreach_asin_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_asin::name, _foreach_asin::overload_name)
      .typed<_foreach_asin::schema>();
}

// aten::_foreach_asin(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_asin::call(at::TensorList self) {
    
    static auto op = create__foreach_asin_typed_handle();
    return op.call(self);
}

// aten::_foreach_asin(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_asin::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_asin_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_asin_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_asin_::schema> create__foreach_asin__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_asin_::name, _foreach_asin_::overload_name)
      .typed<_foreach_asin_::schema>();
}

// aten::_foreach_asin_(Tensor(a!)[] self) -> ()
void _foreach_asin_::call(at::TensorList self) {
    
    static auto op = create__foreach_asin__typed_handle();
    return op.call(self);
}

// aten::_foreach_asin_(Tensor(a!)[] self) -> ()
void _foreach_asin_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_asin__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_atan(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_atan::schema> create__foreach_atan_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_atan::name, _foreach_atan::overload_name)
      .typed<_foreach_atan::schema>();
}

// aten::_foreach_atan(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_atan::call(at::TensorList self) {
    
    static auto op = create__foreach_atan_typed_handle();
    return op.call(self);
}

// aten::_foreach_atan(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_atan::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_atan_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_atan_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_atan_::schema> create__foreach_atan__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_atan_::name, _foreach_atan_::overload_name)
      .typed<_foreach_atan_::schema>();
}

// aten::_foreach_atan_(Tensor(a!)[] self) -> ()
void _foreach_atan_::call(at::TensorList self) {
    
    static auto op = create__foreach_atan__typed_handle();
    return op.call(self);
}

// aten::_foreach_atan_(Tensor(a!)[] self) -> ()
void _foreach_atan_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_atan__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_ceil(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_ceil::schema> create__foreach_ceil_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_ceil::name, _foreach_ceil::overload_name)
      .typed<_foreach_ceil::schema>();
}

// aten::_foreach_ceil(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_ceil::call(at::TensorList self) {
    
    static auto op = create__foreach_ceil_typed_handle();
    return op.call(self);
}

// aten::_foreach_ceil(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_ceil::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_ceil_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_ceil_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_ceil_::schema> create__foreach_ceil__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_ceil_::name, _foreach_ceil_::overload_name)
      .typed<_foreach_ceil_::schema>();
}

// aten::_foreach_ceil_(Tensor(a!)[] self) -> ()
void _foreach_ceil_::call(at::TensorList self) {
    
    static auto op = create__foreach_ceil__typed_handle();
    return op.call(self);
}

// aten::_foreach_ceil_(Tensor(a!)[] self) -> ()
void _foreach_ceil_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_ceil__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_cos(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_cos::schema> create__foreach_cos_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_cos::name, _foreach_cos::overload_name)
      .typed<_foreach_cos::schema>();
}

// aten::_foreach_cos(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_cos::call(at::TensorList self) {
    
    static auto op = create__foreach_cos_typed_handle();
    return op.call(self);
}

// aten::_foreach_cos(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_cos::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_cos_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_cos_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_cos_::schema> create__foreach_cos__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_cos_::name, _foreach_cos_::overload_name)
      .typed<_foreach_cos_::schema>();
}

// aten::_foreach_cos_(Tensor(a!)[] self) -> ()
void _foreach_cos_::call(at::TensorList self) {
    
    static auto op = create__foreach_cos__typed_handle();
    return op.call(self);
}

// aten::_foreach_cos_(Tensor(a!)[] self) -> ()
void _foreach_cos_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_cos__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_cosh(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_cosh::schema> create__foreach_cosh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_cosh::name, _foreach_cosh::overload_name)
      .typed<_foreach_cosh::schema>();
}

// aten::_foreach_cosh(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_cosh::call(at::TensorList self) {
    
    static auto op = create__foreach_cosh_typed_handle();
    return op.call(self);
}

// aten::_foreach_cosh(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_cosh::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_cosh_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_cosh_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_cosh_::schema> create__foreach_cosh__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_cosh_::name, _foreach_cosh_::overload_name)
      .typed<_foreach_cosh_::schema>();
}

// aten::_foreach_cosh_(Tensor(a!)[] self) -> ()
void _foreach_cosh_::call(at::TensorList self) {
    
    static auto op = create__foreach_cosh__typed_handle();
    return op.call(self);
}

// aten::_foreach_cosh_(Tensor(a!)[] self) -> ()
void _foreach_cosh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_cosh__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_erf(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_erf::schema> create__foreach_erf_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_erf::name, _foreach_erf::overload_name)
      .typed<_foreach_erf::schema>();
}

// aten::_foreach_erf(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_erf::call(at::TensorList self) {
    
    static auto op = create__foreach_erf_typed_handle();
    return op.call(self);
}

// aten::_foreach_erf(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_erf::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_erf_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_erf_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_erf_::schema> create__foreach_erf__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_erf_::name, _foreach_erf_::overload_name)
      .typed<_foreach_erf_::schema>();
}

// aten::_foreach_erf_(Tensor(a!)[] self) -> ()
void _foreach_erf_::call(at::TensorList self) {
    
    static auto op = create__foreach_erf__typed_handle();
    return op.call(self);
}

// aten::_foreach_erf_(Tensor(a!)[] self) -> ()
void _foreach_erf_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_erf__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_erfc(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_erfc::schema> create__foreach_erfc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_erfc::name, _foreach_erfc::overload_name)
      .typed<_foreach_erfc::schema>();
}

// aten::_foreach_erfc(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_erfc::call(at::TensorList self) {
    
    static auto op = create__foreach_erfc_typed_handle();
    return op.call(self);
}

// aten::_foreach_erfc(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_erfc::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_erfc_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_erfc_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_erfc_::schema> create__foreach_erfc__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_erfc_::name, _foreach_erfc_::overload_name)
      .typed<_foreach_erfc_::schema>();
}

// aten::_foreach_erfc_(Tensor(a!)[] self) -> ()
void _foreach_erfc_::call(at::TensorList self) {
    
    static auto op = create__foreach_erfc__typed_handle();
    return op.call(self);
}

// aten::_foreach_erfc_(Tensor(a!)[] self) -> ()
void _foreach_erfc_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_erfc__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_exp(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_exp::schema> create__foreach_exp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_exp::name, _foreach_exp::overload_name)
      .typed<_foreach_exp::schema>();
}

// aten::_foreach_exp(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_exp::call(at::TensorList self) {
    
    static auto op = create__foreach_exp_typed_handle();
    return op.call(self);
}

// aten::_foreach_exp(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_exp::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_exp_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_exp_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_exp_::schema> create__foreach_exp__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_exp_::name, _foreach_exp_::overload_name)
      .typed<_foreach_exp_::schema>();
}

// aten::_foreach_exp_(Tensor(a!)[] self) -> ()
void _foreach_exp_::call(at::TensorList self) {
    
    static auto op = create__foreach_exp__typed_handle();
    return op.call(self);
}

// aten::_foreach_exp_(Tensor(a!)[] self) -> ()
void _foreach_exp_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_exp__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_expm1(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_expm1::schema> create__foreach_expm1_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_expm1::name, _foreach_expm1::overload_name)
      .typed<_foreach_expm1::schema>();
}

// aten::_foreach_expm1(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_expm1::call(at::TensorList self) {
    
    static auto op = create__foreach_expm1_typed_handle();
    return op.call(self);
}

// aten::_foreach_expm1(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_expm1::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_expm1_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_expm1_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_expm1_::schema> create__foreach_expm1__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_expm1_::name, _foreach_expm1_::overload_name)
      .typed<_foreach_expm1_::schema>();
}

// aten::_foreach_expm1_(Tensor(a!)[] self) -> ()
void _foreach_expm1_::call(at::TensorList self) {
    
    static auto op = create__foreach_expm1__typed_handle();
    return op.call(self);
}

// aten::_foreach_expm1_(Tensor(a!)[] self) -> ()
void _foreach_expm1_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_expm1__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_floor(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_floor::schema> create__foreach_floor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_floor::name, _foreach_floor::overload_name)
      .typed<_foreach_floor::schema>();
}

// aten::_foreach_floor(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_floor::call(at::TensorList self) {
    
    static auto op = create__foreach_floor_typed_handle();
    return op.call(self);
}

// aten::_foreach_floor(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_floor::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_floor_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_floor_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_floor_::schema> create__foreach_floor__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_floor_::name, _foreach_floor_::overload_name)
      .typed<_foreach_floor_::schema>();
}

// aten::_foreach_floor_(Tensor(a!)[] self) -> ()
void _foreach_floor_::call(at::TensorList self) {
    
    static auto op = create__foreach_floor__typed_handle();
    return op.call(self);
}

// aten::_foreach_floor_(Tensor(a!)[] self) -> ()
void _foreach_floor_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_floor__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_frac(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_frac::schema> create__foreach_frac_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_frac::name, _foreach_frac::overload_name)
      .typed<_foreach_frac::schema>();
}

// aten::_foreach_frac(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_frac::call(at::TensorList self) {
    
    static auto op = create__foreach_frac_typed_handle();
    return op.call(self);
}

// aten::_foreach_frac(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_frac::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_frac_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_frac_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_frac_::schema> create__foreach_frac__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_frac_::name, _foreach_frac_::overload_name)
      .typed<_foreach_frac_::schema>();
}

// aten::_foreach_frac_(Tensor(a!)[] self) -> ()
void _foreach_frac_::call(at::TensorList self) {
    
    static auto op = create__foreach_frac__typed_handle();
    return op.call(self);
}

// aten::_foreach_frac_(Tensor(a!)[] self) -> ()
void _foreach_frac_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_frac__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lerp_List::schema> create__foreach_lerp_List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_lerp_List::name, _foreach_lerp_List::overload_name)
      .typed<_foreach_lerp_List::schema>();
}

// aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]
::std::vector<at::Tensor> _foreach_lerp_List::call(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
    
    static auto op = create__foreach_lerp_List_typed_handle();
    return op.call(self, tensors1, weights);
}

// aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]
::std::vector<at::Tensor> _foreach_lerp_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
    
    static auto op = create__foreach_lerp_List_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensors1, weights);
}

// aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lerp__List::schema> create__foreach_lerp__List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_lerp__List::name, _foreach_lerp__List::overload_name)
      .typed<_foreach_lerp__List::schema>();
}

// aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()
void _foreach_lerp__List::call(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
    
    static auto op = create__foreach_lerp__List_typed_handle();
    return op.call(self, tensors1, weights);
}

// aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()
void _foreach_lerp__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
    
    static auto op = create__foreach_lerp__List_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensors1, weights);
}

// aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lerp_Scalar::schema> create__foreach_lerp_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_lerp_Scalar::name, _foreach_lerp_Scalar::overload_name)
      .typed<_foreach_lerp_Scalar::schema>();
}

// aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]
::std::vector<at::Tensor> _foreach_lerp_Scalar::call(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
    
    static auto op = create__foreach_lerp_Scalar_typed_handle();
    return op.call(self, tensors1, weight);
}

// aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]
::std::vector<at::Tensor> _foreach_lerp_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
    
    static auto op = create__foreach_lerp_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensors1, weight);
}

// aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lerp__Scalar::schema> create__foreach_lerp__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_lerp__Scalar::name, _foreach_lerp__Scalar::overload_name)
      .typed<_foreach_lerp__Scalar::schema>();
}

// aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()
void _foreach_lerp__Scalar::call(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
    
    static auto op = create__foreach_lerp__Scalar_typed_handle();
    return op.call(self, tensors1, weight);
}

// aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()
void _foreach_lerp__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
    
    static auto op = create__foreach_lerp__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensors1, weight);
}

// aten::_foreach_lerp.ScalarList(Tensor[] self, Tensor[] tensors1, Scalar[] weight) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lerp_ScalarList::schema> create__foreach_lerp_ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_lerp_ScalarList::name, _foreach_lerp_ScalarList::overload_name)
      .typed<_foreach_lerp_ScalarList::schema>();
}

// aten::_foreach_lerp.ScalarList(Tensor[] self, Tensor[] tensors1, Scalar[] weight) -> Tensor[]
::std::vector<at::Tensor> _foreach_lerp_ScalarList::call(at::TensorList self, at::TensorList tensors1, at::ArrayRef<at::Scalar> weight) {
    
    static auto op = create__foreach_lerp_ScalarList_typed_handle();
    return op.call(self, tensors1, weight);
}

// aten::_foreach_lerp.ScalarList(Tensor[] self, Tensor[] tensors1, Scalar[] weight) -> Tensor[]
::std::vector<at::Tensor> _foreach_lerp_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::ArrayRef<at::Scalar> weight) {
    
    static auto op = create__foreach_lerp_ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensors1, weight);
}

// aten::_foreach_lerp_.ScalarList(Tensor(a!)[] self, Tensor[] tensors1, Scalar[] weight) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lerp__ScalarList::schema> create__foreach_lerp__ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_lerp__ScalarList::name, _foreach_lerp__ScalarList::overload_name)
      .typed<_foreach_lerp__ScalarList::schema>();
}

// aten::_foreach_lerp_.ScalarList(Tensor(a!)[] self, Tensor[] tensors1, Scalar[] weight) -> ()
void _foreach_lerp__ScalarList::call(at::TensorList self, at::TensorList tensors1, at::ArrayRef<at::Scalar> weight) {
    
    static auto op = create__foreach_lerp__ScalarList_typed_handle();
    return op.call(self, tensors1, weight);
}

// aten::_foreach_lerp_.ScalarList(Tensor(a!)[] self, Tensor[] tensors1, Scalar[] weight) -> ()
void _foreach_lerp__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::ArrayRef<at::Scalar> weight) {
    
    static auto op = create__foreach_lerp__ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensors1, weight);
}

// aten::_foreach_lgamma(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lgamma::schema> create__foreach_lgamma_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_lgamma::name, _foreach_lgamma::overload_name)
      .typed<_foreach_lgamma::schema>();
}

// aten::_foreach_lgamma(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_lgamma::call(at::TensorList self) {
    
    static auto op = create__foreach_lgamma_typed_handle();
    return op.call(self);
}

// aten::_foreach_lgamma(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_lgamma::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_lgamma_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_lgamma_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lgamma_::schema> create__foreach_lgamma__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_lgamma_::name, _foreach_lgamma_::overload_name)
      .typed<_foreach_lgamma_::schema>();
}

// aten::_foreach_lgamma_(Tensor(a!)[] self) -> ()
void _foreach_lgamma_::call(at::TensorList self) {
    
    static auto op = create__foreach_lgamma__typed_handle();
    return op.call(self);
}

// aten::_foreach_lgamma_(Tensor(a!)[] self) -> ()
void _foreach_lgamma_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_lgamma__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_log(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log::schema> create__foreach_log_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log::name, _foreach_log::overload_name)
      .typed<_foreach_log::schema>();
}

// aten::_foreach_log(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_log::call(at::TensorList self) {
    
    static auto op = create__foreach_log_typed_handle();
    return op.call(self);
}

// aten::_foreach_log(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_log::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_log_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_log_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log_::schema> create__foreach_log__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log_::name, _foreach_log_::overload_name)
      .typed<_foreach_log_::schema>();
}

// aten::_foreach_log_(Tensor(a!)[] self) -> ()
void _foreach_log_::call(at::TensorList self) {
    
    static auto op = create__foreach_log__typed_handle();
    return op.call(self);
}

// aten::_foreach_log_(Tensor(a!)[] self) -> ()
void _foreach_log_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_log__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_log10(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log10::schema> create__foreach_log10_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log10::name, _foreach_log10::overload_name)
      .typed<_foreach_log10::schema>();
}

// aten::_foreach_log10(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_log10::call(at::TensorList self) {
    
    static auto op = create__foreach_log10_typed_handle();
    return op.call(self);
}

// aten::_foreach_log10(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_log10::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_log10_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_log10_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log10_::schema> create__foreach_log10__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log10_::name, _foreach_log10_::overload_name)
      .typed<_foreach_log10_::schema>();
}

// aten::_foreach_log10_(Tensor(a!)[] self) -> ()
void _foreach_log10_::call(at::TensorList self) {
    
    static auto op = create__foreach_log10__typed_handle();
    return op.call(self);
}

// aten::_foreach_log10_(Tensor(a!)[] self) -> ()
void _foreach_log10_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_log10__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_log1p(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log1p::schema> create__foreach_log1p_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log1p::name, _foreach_log1p::overload_name)
      .typed<_foreach_log1p::schema>();
}

// aten::_foreach_log1p(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_log1p::call(at::TensorList self) {
    
    static auto op = create__foreach_log1p_typed_handle();
    return op.call(self);
}

// aten::_foreach_log1p(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_log1p::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_log1p_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_log1p_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log1p_::schema> create__foreach_log1p__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log1p_::name, _foreach_log1p_::overload_name)
      .typed<_foreach_log1p_::schema>();
}

// aten::_foreach_log1p_(Tensor(a!)[] self) -> ()
void _foreach_log1p_::call(at::TensorList self) {
    
    static auto op = create__foreach_log1p__typed_handle();
    return op.call(self);
}

// aten::_foreach_log1p_(Tensor(a!)[] self) -> ()
void _foreach_log1p_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_log1p__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_log2(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log2::schema> create__foreach_log2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log2::name, _foreach_log2::overload_name)
      .typed<_foreach_log2::schema>();
}

// aten::_foreach_log2(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_log2::call(at::TensorList self) {
    
    static auto op = create__foreach_log2_typed_handle();
    return op.call(self);
}

// aten::_foreach_log2(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_log2::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_log2_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_log2_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log2_::schema> create__foreach_log2__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log2_::name, _foreach_log2_::overload_name)
      .typed<_foreach_log2_::schema>();
}

// aten::_foreach_log2_(Tensor(a!)[] self) -> ()
void _foreach_log2_::call(at::TensorList self) {
    
    static auto op = create__foreach_log2__typed_handle();
    return op.call(self);
}

// aten::_foreach_log2_(Tensor(a!)[] self) -> ()
void _foreach_log2_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_log2__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_max(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_max::schema> create__foreach_max_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_max::name, _foreach_max::overload_name)
      .typed<_foreach_max::schema>();
}

// aten::_foreach_max(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_max::call(at::TensorList self) {
    
    static auto op = create__foreach_max_typed_handle();
    return op.call(self);
}

// aten::_foreach_max(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_max::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_max_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_neg(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_neg::schema> create__foreach_neg_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_neg::name, _foreach_neg::overload_name)
      .typed<_foreach_neg::schema>();
}

// aten::_foreach_neg(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_neg::call(at::TensorList self) {
    
    static auto op = create__foreach_neg_typed_handle();
    return op.call(self);
}

// aten::_foreach_neg(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_neg::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_neg_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_neg_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_neg_::schema> create__foreach_neg__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_neg_::name, _foreach_neg_::overload_name)
      .typed<_foreach_neg_::schema>();
}

// aten::_foreach_neg_(Tensor(a!)[] self) -> ()
void _foreach_neg_::call(at::TensorList self) {
    
    static auto op = create__foreach_neg__typed_handle();
    return op.call(self);
}

// aten::_foreach_neg_(Tensor(a!)[] self) -> ()
void _foreach_neg_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_neg__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2, ScalarType? dtype=None) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_norm_Scalar::schema> create__foreach_norm_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_norm_Scalar::name, _foreach_norm_Scalar::overload_name)
      .typed<_foreach_norm_Scalar::schema>();
}

// aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2, ScalarType? dtype=None) -> Tensor[]
::std::vector<at::Tensor> _foreach_norm_Scalar::call(at::TensorList self, const at::Scalar & ord, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__foreach_norm_Scalar_typed_handle();
    return op.call(self, ord, dtype);
}

// aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2, ScalarType? dtype=None) -> Tensor[]
::std::vector<at::Tensor> _foreach_norm_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & ord, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__foreach_norm_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dtype);
}

// aten::_foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow_List::schema> create__foreach_pow_List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow_List::name, _foreach_pow_List::overload_name)
      .typed<_foreach_pow_List::schema>();
}

// aten::_foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_List::call(at::TensorList self, at::TensorList exponent) {
    
    static auto op = create__foreach_pow_List_typed_handle();
    return op.call(self, exponent);
}

// aten::_foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList exponent) {
    
    static auto op = create__foreach_pow_List_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::_foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow_Scalar::schema> create__foreach_pow_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow_Scalar::name, _foreach_pow_Scalar::overload_name)
      .typed<_foreach_pow_Scalar::schema>();
}

// aten::_foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_Scalar::call(at::TensorList self, const at::Scalar & exponent) {
    
    static auto op = create__foreach_pow_Scalar_typed_handle();
    return op.call(self, exponent);
}

// aten::_foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & exponent) {
    
    static auto op = create__foreach_pow_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::_foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow_ScalarList::schema> create__foreach_pow_ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow_ScalarList::name, _foreach_pow_ScalarList::overload_name)
      .typed<_foreach_pow_ScalarList::schema>();
}

// aten::_foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
    
    static auto op = create__foreach_pow_ScalarList_typed_handle();
    return op.call(self, exponent);
}

// aten::_foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
    
    static auto op = create__foreach_pow_ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::_foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow_ScalarAndTensor::schema> create__foreach_pow_ScalarAndTensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow_ScalarAndTensor::name, _foreach_pow_ScalarAndTensor::overload_name)
      .typed<_foreach_pow_ScalarAndTensor::schema>();
}

// aten::_foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_ScalarAndTensor::call(const at::Scalar & self, at::TensorList exponent) {
    
    static auto op = create__foreach_pow_ScalarAndTensor_typed_handle();
    return op.call(self, exponent);
}

// aten::_foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[]
::std::vector<at::Tensor> _foreach_pow_ScalarAndTensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, at::TensorList exponent) {
    
    static auto op = create__foreach_pow_ScalarAndTensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::_foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow__List::schema> create__foreach_pow__List_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow__List::name, _foreach_pow__List::overload_name)
      .typed<_foreach_pow__List::schema>();
}

// aten::_foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> ()
void _foreach_pow__List::call(at::TensorList self, at::TensorList exponent) {
    
    static auto op = create__foreach_pow__List_typed_handle();
    return op.call(self, exponent);
}

// aten::_foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> ()
void _foreach_pow__List::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList exponent) {
    
    static auto op = create__foreach_pow__List_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::_foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow__Scalar::schema> create__foreach_pow__Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow__Scalar::name, _foreach_pow__Scalar::overload_name)
      .typed<_foreach_pow__Scalar::schema>();
}

// aten::_foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> ()
void _foreach_pow__Scalar::call(at::TensorList self, const at::Scalar & exponent) {
    
    static auto op = create__foreach_pow__Scalar_typed_handle();
    return op.call(self, exponent);
}

// aten::_foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> ()
void _foreach_pow__Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & exponent) {
    
    static auto op = create__foreach_pow__Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::_foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow__ScalarList::schema> create__foreach_pow__ScalarList_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow__ScalarList::name, _foreach_pow__ScalarList::overload_name)
      .typed<_foreach_pow__ScalarList::schema>();
}

// aten::_foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> ()
void _foreach_pow__ScalarList::call(at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
    
    static auto op = create__foreach_pow__ScalarList_typed_handle();
    return op.call(self, exponent);
}

// aten::_foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> ()
void _foreach_pow__ScalarList::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
    
    static auto op = create__foreach_pow__ScalarList_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent);
}

// aten::_foreach_reciprocal(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_reciprocal::schema> create__foreach_reciprocal_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_reciprocal::name, _foreach_reciprocal::overload_name)
      .typed<_foreach_reciprocal::schema>();
}

// aten::_foreach_reciprocal(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_reciprocal::call(at::TensorList self) {
    
    static auto op = create__foreach_reciprocal_typed_handle();
    return op.call(self);
}

// aten::_foreach_reciprocal(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_reciprocal::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_reciprocal_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_reciprocal_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_reciprocal_::schema> create__foreach_reciprocal__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_reciprocal_::name, _foreach_reciprocal_::overload_name)
      .typed<_foreach_reciprocal_::schema>();
}

// aten::_foreach_reciprocal_(Tensor(a!)[] self) -> ()
void _foreach_reciprocal_::call(at::TensorList self) {
    
    static auto op = create__foreach_reciprocal__typed_handle();
    return op.call(self);
}

// aten::_foreach_reciprocal_(Tensor(a!)[] self) -> ()
void _foreach_reciprocal_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_reciprocal__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_round(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_round::schema> create__foreach_round_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_round::name, _foreach_round::overload_name)
      .typed<_foreach_round::schema>();
}

// aten::_foreach_round(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_round::call(at::TensorList self) {
    
    static auto op = create__foreach_round_typed_handle();
    return op.call(self);
}

// aten::_foreach_round(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_round::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_round_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_round_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_round_::schema> create__foreach_round__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_round_::name, _foreach_round_::overload_name)
      .typed<_foreach_round_::schema>();
}

// aten::_foreach_round_(Tensor(a!)[] self) -> ()
void _foreach_round_::call(at::TensorList self) {
    
    static auto op = create__foreach_round__typed_handle();
    return op.call(self);
}

// aten::_foreach_round_(Tensor(a!)[] self) -> ()
void _foreach_round_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_round__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_rsqrt(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_rsqrt::schema> create__foreach_rsqrt_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_rsqrt::name, _foreach_rsqrt::overload_name)
      .typed<_foreach_rsqrt::schema>();
}

// aten::_foreach_rsqrt(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_rsqrt::call(at::TensorList self) {
    
    static auto op = create__foreach_rsqrt_typed_handle();
    return op.call(self);
}

// aten::_foreach_rsqrt(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_rsqrt::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_rsqrt_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_rsqrt_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_rsqrt_::schema> create__foreach_rsqrt__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_rsqrt_::name, _foreach_rsqrt_::overload_name)
      .typed<_foreach_rsqrt_::schema>();
}

// aten::_foreach_rsqrt_(Tensor(a!)[] self) -> ()
void _foreach_rsqrt_::call(at::TensorList self) {
    
    static auto op = create__foreach_rsqrt__typed_handle();
    return op.call(self);
}

// aten::_foreach_rsqrt_(Tensor(a!)[] self) -> ()
void _foreach_rsqrt_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_rsqrt__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sigmoid(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sigmoid::schema> create__foreach_sigmoid_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sigmoid::name, _foreach_sigmoid::overload_name)
      .typed<_foreach_sigmoid::schema>();
}

// aten::_foreach_sigmoid(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sigmoid::call(at::TensorList self) {
    
    static auto op = create__foreach_sigmoid_typed_handle();
    return op.call(self);
}

// aten::_foreach_sigmoid(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sigmoid::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sigmoid_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sigmoid_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sigmoid_::schema> create__foreach_sigmoid__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sigmoid_::name, _foreach_sigmoid_::overload_name)
      .typed<_foreach_sigmoid_::schema>();
}

// aten::_foreach_sigmoid_(Tensor(a!)[] self) -> ()
void _foreach_sigmoid_::call(at::TensorList self) {
    
    static auto op = create__foreach_sigmoid__typed_handle();
    return op.call(self);
}

// aten::_foreach_sigmoid_(Tensor(a!)[] self) -> ()
void _foreach_sigmoid_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sigmoid__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sign(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sign::schema> create__foreach_sign_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sign::name, _foreach_sign::overload_name)
      .typed<_foreach_sign::schema>();
}

// aten::_foreach_sign(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sign::call(at::TensorList self) {
    
    static auto op = create__foreach_sign_typed_handle();
    return op.call(self);
}

// aten::_foreach_sign(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sign::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sign_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sign_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sign_::schema> create__foreach_sign__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sign_::name, _foreach_sign_::overload_name)
      .typed<_foreach_sign_::schema>();
}

// aten::_foreach_sign_(Tensor(a!)[] self) -> ()
void _foreach_sign_::call(at::TensorList self) {
    
    static auto op = create__foreach_sign__typed_handle();
    return op.call(self);
}

// aten::_foreach_sign_(Tensor(a!)[] self) -> ()
void _foreach_sign_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sign__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sin(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sin::schema> create__foreach_sin_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sin::name, _foreach_sin::overload_name)
      .typed<_foreach_sin::schema>();
}

// aten::_foreach_sin(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sin::call(at::TensorList self) {
    
    static auto op = create__foreach_sin_typed_handle();
    return op.call(self);
}

// aten::_foreach_sin(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sin::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sin_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sin_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sin_::schema> create__foreach_sin__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sin_::name, _foreach_sin_::overload_name)
      .typed<_foreach_sin_::schema>();
}

// aten::_foreach_sin_(Tensor(a!)[] self) -> ()
void _foreach_sin_::call(at::TensorList self) {
    
    static auto op = create__foreach_sin__typed_handle();
    return op.call(self);
}

// aten::_foreach_sin_(Tensor(a!)[] self) -> ()
void _foreach_sin_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sin__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sinh(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sinh::schema> create__foreach_sinh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sinh::name, _foreach_sinh::overload_name)
      .typed<_foreach_sinh::schema>();
}

// aten::_foreach_sinh(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sinh::call(at::TensorList self) {
    
    static auto op = create__foreach_sinh_typed_handle();
    return op.call(self);
}

// aten::_foreach_sinh(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sinh::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sinh_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sinh_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sinh_::schema> create__foreach_sinh__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sinh_::name, _foreach_sinh_::overload_name)
      .typed<_foreach_sinh_::schema>();
}

// aten::_foreach_sinh_(Tensor(a!)[] self) -> ()
void _foreach_sinh_::call(at::TensorList self) {
    
    static auto op = create__foreach_sinh__typed_handle();
    return op.call(self);
}

// aten::_foreach_sinh_(Tensor(a!)[] self) -> ()
void _foreach_sinh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sinh__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sqrt(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sqrt::schema> create__foreach_sqrt_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sqrt::name, _foreach_sqrt::overload_name)
      .typed<_foreach_sqrt::schema>();
}

// aten::_foreach_sqrt(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sqrt::call(at::TensorList self) {
    
    static auto op = create__foreach_sqrt_typed_handle();
    return op.call(self);
}

// aten::_foreach_sqrt(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_sqrt::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sqrt_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_sqrt_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sqrt_::schema> create__foreach_sqrt__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sqrt_::name, _foreach_sqrt_::overload_name)
      .typed<_foreach_sqrt_::schema>();
}

// aten::_foreach_sqrt_(Tensor(a!)[] self) -> ()
void _foreach_sqrt_::call(at::TensorList self) {
    
    static auto op = create__foreach_sqrt__typed_handle();
    return op.call(self);
}

// aten::_foreach_sqrt_(Tensor(a!)[] self) -> ()
void _foreach_sqrt_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_sqrt__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_tan(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_tan::schema> create__foreach_tan_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_tan::name, _foreach_tan::overload_name)
      .typed<_foreach_tan::schema>();
}

// aten::_foreach_tan(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_tan::call(at::TensorList self) {
    
    static auto op = create__foreach_tan_typed_handle();
    return op.call(self);
}

// aten::_foreach_tan(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_tan::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_tan_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_tan_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_tan_::schema> create__foreach_tan__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_tan_::name, _foreach_tan_::overload_name)
      .typed<_foreach_tan_::schema>();
}

// aten::_foreach_tan_(Tensor(a!)[] self) -> ()
void _foreach_tan_::call(at::TensorList self) {
    
    static auto op = create__foreach_tan__typed_handle();
    return op.call(self);
}

// aten::_foreach_tan_(Tensor(a!)[] self) -> ()
void _foreach_tan_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_tan__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_tanh(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_tanh::schema> create__foreach_tanh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_tanh::name, _foreach_tanh::overload_name)
      .typed<_foreach_tanh::schema>();
}

// aten::_foreach_tanh(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_tanh::call(at::TensorList self) {
    
    static auto op = create__foreach_tanh_typed_handle();
    return op.call(self);
}

// aten::_foreach_tanh(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_tanh::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_tanh_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_tanh_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_tanh_::schema> create__foreach_tanh__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_tanh_::name, _foreach_tanh_::overload_name)
      .typed<_foreach_tanh_::schema>();
}

// aten::_foreach_tanh_(Tensor(a!)[] self) -> ()
void _foreach_tanh_::call(at::TensorList self) {
    
    static auto op = create__foreach_tanh__typed_handle();
    return op.call(self);
}

// aten::_foreach_tanh_(Tensor(a!)[] self) -> ()
void _foreach_tanh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_tanh__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_trunc(Tensor[] self) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_trunc::schema> create__foreach_trunc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_trunc::name, _foreach_trunc::overload_name)
      .typed<_foreach_trunc::schema>();
}

// aten::_foreach_trunc(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_trunc::call(at::TensorList self) {
    
    static auto op = create__foreach_trunc_typed_handle();
    return op.call(self);
}

// aten::_foreach_trunc(Tensor[] self) -> Tensor[]
::std::vector<at::Tensor> _foreach_trunc::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_trunc_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_trunc_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_trunc_::schema> create__foreach_trunc__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_trunc_::name, _foreach_trunc_::overload_name)
      .typed<_foreach_trunc_::schema>();
}

// aten::_foreach_trunc_(Tensor(a!)[] self) -> ()
void _foreach_trunc_::call(at::TensorList self) {
    
    static auto op = create__foreach_trunc__typed_handle();
    return op.call(self);
}

// aten::_foreach_trunc_(Tensor(a!)[] self) -> ()
void _foreach_trunc_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_trunc__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_zero_(Tensor(a!)[] self) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_zero_::schema> create__foreach_zero__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_zero_::name, _foreach_zero_::overload_name)
      .typed<_foreach_zero_::schema>();
}

// aten::_foreach_zero_(Tensor(a!)[] self) -> ()
void _foreach_zero_::call(at::TensorList self) {
    
    static auto op = create__foreach_zero__typed_handle();
    return op.call(self);
}

// aten::_foreach_zero_(Tensor(a!)[] self) -> ()
void _foreach_zero_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_zero__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_copy_(Tensor(a!)[] self, Tensor[] src, bool non_blocking=False) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_copy_::schema> create__foreach_copy__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_copy_::name, _foreach_copy_::overload_name)
      .typed<_foreach_copy_::schema>();
}

// aten::_foreach_copy_(Tensor(a!)[] self, Tensor[] src, bool non_blocking=False) -> ()
void _foreach_copy_::call(at::TensorList self, at::TensorList src, bool non_blocking) {
    
    static auto op = create__foreach_copy__typed_handle();
    return op.call(self, src, non_blocking);
}

// aten::_foreach_copy_(Tensor(a!)[] self, Tensor[] src, bool non_blocking=False) -> ()
void _foreach_copy_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList src, bool non_blocking) {
    
    static auto op = create__foreach_copy__typed_handle();
    return op.redispatch(dispatchKeySet, self, src, non_blocking);
}

// aten::_foreach_copy(Tensor[] self, Tensor[] src, bool non_blocking=False) -> Tensor[] self_out
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_copy::schema> create__foreach_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_copy::name, _foreach_copy::overload_name)
      .typed<_foreach_copy::schema>();
}

// aten::_foreach_copy(Tensor[] self, Tensor[] src, bool non_blocking=False) -> Tensor[] self_out
::std::vector<at::Tensor> _foreach_copy::call(at::TensorList self, at::TensorList src, bool non_blocking) {
    
    static auto op = create__foreach_copy_typed_handle();
    return op.call(self, src, non_blocking);
}

// aten::_foreach_copy(Tensor[] self, Tensor[] src, bool non_blocking=False) -> Tensor[] self_out
::std::vector<at::Tensor> _foreach_copy::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList src, bool non_blocking) {
    
    static auto op = create__foreach_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, src, non_blocking);
}

// aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bucketize_Tensor::schema> create_bucketize_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bucketize_Tensor::name, bucketize_Tensor::overload_name)
      .typed<bucketize_Tensor::schema>();
}

// aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
at::Tensor bucketize_Tensor::call(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) {
    
    static auto op = create_bucketize_Tensor_typed_handle();
    return op.call(self, boundaries, out_int32, right);
}

// aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
at::Tensor bucketize_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) {
    
    static auto op = create_bucketize_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, boundaries, out_int32, right);
}

// aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bucketize_Tensor_out::schema> create_bucketize_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bucketize_Tensor_out::name, bucketize_Tensor_out::overload_name)
      .typed<bucketize_Tensor_out::schema>();
}

// aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bucketize_Tensor_out::call(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
    
    static auto op = create_bucketize_Tensor_out_typed_handle();
    return op.call(self, boundaries, out_int32, right, out);
}

// aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bucketize_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
    
    static auto op = create_bucketize_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, boundaries, out_int32, right, out);
}

// aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bucketize_Scalar::schema> create_bucketize_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bucketize_Scalar::name, bucketize_Scalar::overload_name)
      .typed<bucketize_Scalar::schema>();
}

// aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
at::Tensor bucketize_Scalar::call(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) {
    
    static auto op = create_bucketize_Scalar_typed_handle();
    return op.call(self, boundaries, out_int32, right);
}

// aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
at::Tensor bucketize_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) {
    
    static auto op = create_bucketize_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, boundaries, out_int32, right);
}

// aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<searchsorted_Tensor::schema> create_searchsorted_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(searchsorted_Tensor::name, searchsorted_Tensor::overload_name)
      .typed<searchsorted_Tensor::schema>();
}

// aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
at::Tensor searchsorted_Tensor::call(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter) {
    
    static auto op = create_searchsorted_Tensor_typed_handle();
    return op.call(sorted_sequence, self, out_int32, right, side, sorter);
}

// aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
at::Tensor searchsorted_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter) {
    
    static auto op = create_searchsorted_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter);
}

// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<searchsorted_Tensor_out::schema> create_searchsorted_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(searchsorted_Tensor_out::name, searchsorted_Tensor_out::overload_name)
      .typed<searchsorted_Tensor_out::schema>();
}

// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & searchsorted_Tensor_out::call(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter, at::Tensor & out) {
    
    static auto op = create_searchsorted_Tensor_out_typed_handle();
    return op.call(sorted_sequence, self, out_int32, right, side, sorter, out);
}

// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & searchsorted_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter, at::Tensor & out) {
    
    static auto op = create_searchsorted_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter, out);
}

// aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<searchsorted_Scalar::schema> create_searchsorted_Scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(searchsorted_Scalar::name, searchsorted_Scalar::overload_name)
      .typed<searchsorted_Scalar::schema>();
}

// aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
at::Tensor searchsorted_Scalar::call(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter) {
    
    static auto op = create_searchsorted_Scalar_typed_handle();
    return op.call(sorted_sequence, self, out_int32, right, side, sorter);
}

// aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
at::Tensor searchsorted_Scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter) {
    
    static auto op = create_searchsorted_Scalar_typed_handle();
    return op.redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter);
}

// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<searchsorted_Scalar_out::schema> create_searchsorted_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(searchsorted_Scalar_out::name, searchsorted_Scalar_out::overload_name)
      .typed<searchsorted_Scalar_out::schema>();
}

// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & searchsorted_Scalar_out::call(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter, at::Tensor & out) {
    
    static auto op = create_searchsorted_Scalar_out_typed_handle();
    return op.call(sorted_sequence, self, out_int32, right, side, sorter, out);
}

// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & searchsorted_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter, at::Tensor & out) {
    
    static auto op = create_searchsorted_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter, out);
}

// aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_convert_indices_from_coo_to_csr::schema> create__convert_indices_from_coo_to_csr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_convert_indices_from_coo_to_csr::name, _convert_indices_from_coo_to_csr::overload_name)
      .typed<_convert_indices_from_coo_to_csr::schema>();
}

// aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor
at::Tensor _convert_indices_from_coo_to_csr::call(const at::Tensor & self, int64_t size, bool out_int32) {
    
    static auto op = create__convert_indices_from_coo_to_csr_typed_handle();
    return op.call(self, size, out_int32);
}

// aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor
at::Tensor _convert_indices_from_coo_to_csr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t size, bool out_int32) {
    
    static auto op = create__convert_indices_from_coo_to_csr_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, out_int32);
}

// aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_convert_indices_from_coo_to_csr_out::schema> create__convert_indices_from_coo_to_csr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_convert_indices_from_coo_to_csr_out::name, _convert_indices_from_coo_to_csr_out::overload_name)
      .typed<_convert_indices_from_coo_to_csr_out::schema>();
}

// aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _convert_indices_from_coo_to_csr_out::call(const at::Tensor & self, int64_t size, bool out_int32, at::Tensor & out) {
    
    static auto op = create__convert_indices_from_coo_to_csr_out_typed_handle();
    return op.call(self, size, out_int32, out);
}

// aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _convert_indices_from_coo_to_csr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t size, bool out_int32, at::Tensor & out) {
    
    static auto op = create__convert_indices_from_coo_to_csr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, out_int32, out);
}

// aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_convert_indices_from_csr_to_coo::schema> create__convert_indices_from_csr_to_coo_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_convert_indices_from_csr_to_coo::name, _convert_indices_from_csr_to_coo::overload_name)
      .typed<_convert_indices_from_csr_to_coo::schema>();
}

// aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor
at::Tensor _convert_indices_from_csr_to_coo::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) {
    
    static auto op = create__convert_indices_from_csr_to_coo_typed_handle();
    return op.call(crow_indices, col_indices, out_int32, transpose);
}

// aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor
at::Tensor _convert_indices_from_csr_to_coo::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) {
    
    static auto op = create__convert_indices_from_csr_to_coo_typed_handle();
    return op.redispatch(dispatchKeySet, crow_indices, col_indices, out_int32, transpose);
}

// aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_convert_indices_from_csr_to_coo_out::schema> create__convert_indices_from_csr_to_coo_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_convert_indices_from_csr_to_coo_out::name, _convert_indices_from_csr_to_coo_out::overload_name)
      .typed<_convert_indices_from_csr_to_coo_out::schema>();
}

// aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _convert_indices_from_csr_to_coo_out::call(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out) {
    
    static auto op = create__convert_indices_from_csr_to_coo_out_typed_handle();
    return op.call(crow_indices, col_indices, out_int32, transpose, out);
}

// aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _convert_indices_from_csr_to_coo_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out) {
    
    static auto op = create__convert_indices_from_csr_to_coo_out_typed_handle();
    return op.redispatch(dispatchKeySet, crow_indices, col_indices, out_int32, transpose, out);
}

// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mse_loss_out::schema> create_mse_loss_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mse_loss_out::name, mse_loss_out::overload_name)
      .typed<mse_loss_out::schema>();
}

// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mse_loss_out::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_mse_loss_out_typed_handle();
    return op.call(self, target, reduction, out);
}

// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mse_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_mse_loss_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction, out);
}

// aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mse_loss::schema> create_mse_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mse_loss::name, mse_loss::overload_name)
      .typed<mse_loss::schema>();
}

// aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
at::Tensor mse_loss::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_mse_loss_typed_handle();
    return op.call(self, target, reduction);
}

// aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
at::Tensor mse_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_mse_loss_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction);
}

// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mse_loss_backward_grad_input::schema> create_mse_loss_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mse_loss_backward_grad_input::name, mse_loss_backward_grad_input::overload_name)
      .typed<mse_loss_backward_grad_input::schema>();
}

// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & mse_loss_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
    
    static auto op = create_mse_loss_backward_grad_input_typed_handle();
    return op.call(grad_output, self, target, reduction, grad_input);
}

// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & mse_loss_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
    
    static auto op = create_mse_loss_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, grad_input);
}

// aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mse_loss_backward::schema> create_mse_loss_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mse_loss_backward::name, mse_loss_backward::overload_name)
      .typed<mse_loss_backward::schema>();
}

// aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
at::Tensor mse_loss_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_mse_loss_backward_typed_handle();
    return op.call(grad_output, self, target, reduction);
}

// aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
at::Tensor mse_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_mse_loss_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, reduction);
}

// aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<l1_loss::schema> create_l1_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(l1_loss::name, l1_loss::overload_name)
      .typed<l1_loss::schema>();
}

// aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
at::Tensor l1_loss::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_l1_loss_typed_handle();
    return op.call(self, target, reduction);
}

// aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
at::Tensor l1_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_l1_loss_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction);
}

// aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<multi_margin_loss_out::schema> create_multi_margin_loss_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multi_margin_loss_out::name, multi_margin_loss_out::overload_name)
      .typed<multi_margin_loss_out::schema>();
}

// aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & multi_margin_loss_out::call(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_multi_margin_loss_out_typed_handle();
    return op.call(self, target, p, margin, weight, reduction, out);
}

// aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & multi_margin_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_multi_margin_loss_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, p, margin, weight, reduction, out);
}

// aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<multi_margin_loss::schema> create_multi_margin_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multi_margin_loss::name, multi_margin_loss::overload_name)
      .typed<multi_margin_loss::schema>();
}

// aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor
at::Tensor multi_margin_loss::call(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
    
    static auto op = create_multi_margin_loss_typed_handle();
    return op.call(self, target, p, margin, weight, reduction);
}

// aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor
at::Tensor multi_margin_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
    
    static auto op = create_multi_margin_loss_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, p, margin, weight, reduction);
}

// aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<multi_margin_loss_backward_grad_input::schema> create_multi_margin_loss_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multi_margin_loss_backward_grad_input::name, multi_margin_loss_backward_grad_input::overload_name)
      .typed<multi_margin_loss_backward_grad_input::schema>();
}

// aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & multi_margin_loss_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
    
    static auto op = create_multi_margin_loss_backward_grad_input_typed_handle();
    return op.call(grad_output, self, target, p, margin, weight, reduction, grad_input);
}

// aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & multi_margin_loss_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
    
    static auto op = create_multi_margin_loss_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, p, margin, weight, reduction, grad_input);
}

// aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<multi_margin_loss_backward::schema> create_multi_margin_loss_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multi_margin_loss_backward::name, multi_margin_loss_backward::overload_name)
      .typed<multi_margin_loss_backward::schema>();
}

// aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor
at::Tensor multi_margin_loss_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
    
    static auto op = create_multi_margin_loss_backward_typed_handle();
    return op.call(grad_output, self, target, p, margin, weight, reduction);
}

// aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor
at::Tensor multi_margin_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction) {
    
    static auto op = create_multi_margin_loss_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, p, margin, weight, reduction);
}

// aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<multilabel_margin_loss_out::schema> create_multilabel_margin_loss_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multilabel_margin_loss_out::name, multilabel_margin_loss_out::overload_name)
      .typed<multilabel_margin_loss_out::schema>();
}

// aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & multilabel_margin_loss_out::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_multilabel_margin_loss_out_typed_handle();
    return op.call(self, target, reduction, out);
}

// aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & multilabel_margin_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_multilabel_margin_loss_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction, out);
}

// aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<multilabel_margin_loss::schema> create_multilabel_margin_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multilabel_margin_loss::name, multilabel_margin_loss::overload_name)
      .typed<multilabel_margin_loss::schema>();
}

// aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
at::Tensor multilabel_margin_loss::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_multilabel_margin_loss_typed_handle();
    return op.call(self, target, reduction);
}

// aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
at::Tensor multilabel_margin_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_multilabel_margin_loss_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction);
}

// aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<multilabel_margin_loss_forward_output::schema> create_multilabel_margin_loss_forward_output_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multilabel_margin_loss_forward_output::name, multilabel_margin_loss_forward_output::overload_name)
      .typed<multilabel_margin_loss_forward_output::schema>();
}

// aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> multilabel_margin_loss_forward_output::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & output, at::Tensor & is_target) {
    
    static auto op = create_multilabel_margin_loss_forward_output_typed_handle();
    return op.call(self, target, reduction, output, is_target);
}

// aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> multilabel_margin_loss_forward_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & output, at::Tensor & is_target) {
    
    static auto op = create_multilabel_margin_loss_forward_output_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction, output, is_target);
}

// aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)
static C10_NOINLINE c10::TypedOperatorHandle<multilabel_margin_loss_forward::schema> create_multilabel_margin_loss_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multilabel_margin_loss_forward::name, multilabel_margin_loss_forward::overload_name)
      .typed<multilabel_margin_loss_forward::schema>();
}

// aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)
::std::tuple<at::Tensor,at::Tensor> multilabel_margin_loss_forward::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_multilabel_margin_loss_forward_typed_handle();
    return op.call(self, target, reduction);
}

// aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)
::std::tuple<at::Tensor,at::Tensor> multilabel_margin_loss_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_multilabel_margin_loss_forward_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction);
}

// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<multilabel_margin_loss_backward_grad_input::schema> create_multilabel_margin_loss_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multilabel_margin_loss_backward_grad_input::name, multilabel_margin_loss_backward_grad_input::overload_name)
      .typed<multilabel_margin_loss_backward_grad_input::schema>();
}

// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & multilabel_margin_loss_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input) {
    
    static auto op = create_multilabel_margin_loss_backward_grad_input_typed_handle();
    return op.call(grad_output, self, target, reduction, is_target, grad_input);
}

// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & multilabel_margin_loss_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input) {
    
    static auto op = create_multilabel_margin_loss_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, is_target, grad_input);
}

// aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<multilabel_margin_loss_backward::schema> create_multilabel_margin_loss_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(multilabel_margin_loss_backward::name, multilabel_margin_loss_backward::overload_name)
      .typed<multilabel_margin_loss_backward::schema>();
}

// aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor
at::Tensor multilabel_margin_loss_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) {
    
    static auto op = create_multilabel_margin_loss_backward_typed_handle();
    return op.call(grad_output, self, target, reduction, is_target);
}

// aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor
at::Tensor multilabel_margin_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) {
    
    static auto op = create_multilabel_margin_loss_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, is_target);
}

// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nll_loss_out::schema> create_nll_loss_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nll_loss_out::name, nll_loss_out::overload_name)
      .typed<nll_loss_out::schema>();
}

// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nll_loss_out::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
    
    static auto op = create_nll_loss_out_typed_handle();
    return op.call(self, target, weight, reduction, ignore_index, out);
}

// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nll_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
    
    static auto op = create_nll_loss_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out);
}

// aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nll_loss_nd::schema> create_nll_loss_nd_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nll_loss_nd::name, nll_loss_nd::overload_name)
      .typed<nll_loss_nd::schema>();
}

// aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
at::Tensor nll_loss_nd::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
    
    static auto op = create_nll_loss_nd_typed_handle();
    return op.call(self, target, weight, reduction, ignore_index);
}

// aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
at::Tensor nll_loss_nd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
    
    static auto op = create_nll_loss_nd_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index);
}

// aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nll_loss::schema> create_nll_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nll_loss::name, nll_loss::overload_name)
      .typed<nll_loss::schema>();
}

// aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
at::Tensor nll_loss::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
    
    static auto op = create_nll_loss_typed_handle();
    return op.call(self, target, weight, reduction, ignore_index);
}

// aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
at::Tensor nll_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
    
    static auto op = create_nll_loss_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index);
}

// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<nll_loss_forward_output::schema> create_nll_loss_forward_output_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nll_loss_forward_output::name, nll_loss_forward_output::overload_name)
      .typed<nll_loss_forward_output::schema>();
}

// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_output::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
    
    static auto op = create_nll_loss_forward_output_typed_handle();
    return op.call(self, target, weight, reduction, ignore_index, output, total_weight);
}

// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
    
    static auto op = create_nll_loss_forward_output_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight);
}

// aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
static C10_NOINLINE c10::TypedOperatorHandle<nll_loss_forward::schema> create_nll_loss_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nll_loss_forward::name, nll_loss_forward::overload_name)
      .typed<nll_loss_forward::schema>();
}

// aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
::std::tuple<at::Tensor,at::Tensor> nll_loss_forward::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
    
    static auto op = create_nll_loss_forward_typed_handle();
    return op.call(self, target, weight, reduction, ignore_index);
}

// aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
::std::tuple<at::Tensor,at::Tensor> nll_loss_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
    
    static auto op = create_nll_loss_forward_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index);
}

// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nll_loss_backward_grad_input::schema> create_nll_loss_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nll_loss_backward_grad_input::name, nll_loss_backward_grad_input::overload_name)
      .typed<nll_loss_backward_grad_input::schema>();
}

// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & nll_loss_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
    
    static auto op = create_nll_loss_backward_grad_input_typed_handle();
    return op.call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
}

// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & nll_loss_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
    
    static auto op = create_nll_loss_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
}

// aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nll_loss_backward::schema> create_nll_loss_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nll_loss_backward::name, nll_loss_backward::overload_name)
      .typed<nll_loss_backward::schema>();
}

// aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
at::Tensor nll_loss_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
    
    static auto op = create_nll_loss_backward_typed_handle();
    return op.call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
}

// aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
at::Tensor nll_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
    
    static auto op = create_nll_loss_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight);
}

// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nll_loss2d_out::schema> create_nll_loss2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nll_loss2d_out::name, nll_loss2d_out::overload_name)
      .typed<nll_loss2d_out::schema>();
}

// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nll_loss2d_out::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
    
    static auto op = create_nll_loss2d_out_typed_handle();
    return op.call(self, target, weight, reduction, ignore_index, out);
}

// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nll_loss2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
    
    static auto op = create_nll_loss2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out);
}

// aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nll_loss2d::schema> create_nll_loss2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nll_loss2d::name, nll_loss2d::overload_name)
      .typed<nll_loss2d::schema>();
}

// aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
at::Tensor nll_loss2d::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
    
    static auto op = create_nll_loss2d_typed_handle();
    return op.call(self, target, weight, reduction, ignore_index);
}

// aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
at::Tensor nll_loss2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
    
    static auto op = create_nll_loss2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index);
}

// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<nll_loss2d_forward_output::schema> create_nll_loss2d_forward_output_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nll_loss2d_forward_output::name, nll_loss2d_forward_output::overload_name)
      .typed<nll_loss2d_forward_output::schema>();
}

// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_output::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
    
    static auto op = create_nll_loss2d_forward_output_typed_handle();
    return op.call(self, target, weight, reduction, ignore_index, output, total_weight);
}

// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
    
    static auto op = create_nll_loss2d_forward_output_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight);
}

// aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
static C10_NOINLINE c10::TypedOperatorHandle<nll_loss2d_forward::schema> create_nll_loss2d_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nll_loss2d_forward::name, nll_loss2d_forward::overload_name)
      .typed<nll_loss2d_forward::schema>();
}

// aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
    
    static auto op = create_nll_loss2d_forward_typed_handle();
    return op.call(self, target, weight, reduction, ignore_index);
}

// aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
    
    static auto op = create_nll_loss2d_forward_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index);
}

// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nll_loss2d_backward_grad_input::schema> create_nll_loss2d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nll_loss2d_backward_grad_input::name, nll_loss2d_backward_grad_input::overload_name)
      .typed<nll_loss2d_backward_grad_input::schema>();
}

// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & nll_loss2d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
    
    static auto op = create_nll_loss2d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
}

// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & nll_loss2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
    
    static auto op = create_nll_loss2d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
}

// aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nll_loss2d_backward::schema> create_nll_loss2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nll_loss2d_backward::name, nll_loss2d_backward::overload_name)
      .typed<nll_loss2d_backward::schema>();
}

// aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
at::Tensor nll_loss2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
    
    static auto op = create_nll_loss2d_backward_typed_handle();
    return op.call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
}

// aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
at::Tensor nll_loss2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
    
    static auto op = create_nll_loss2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight);
}

// aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<smooth_l1_loss_out::schema> create_smooth_l1_loss_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(smooth_l1_loss_out::name, smooth_l1_loss_out::overload_name)
      .typed<smooth_l1_loss_out::schema>();
}

// aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & smooth_l1_loss_out::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & out) {
    
    static auto op = create_smooth_l1_loss_out_typed_handle();
    return op.call(self, target, reduction, beta, out);
}

// aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & smooth_l1_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & out) {
    
    static auto op = create_smooth_l1_loss_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction, beta, out);
}

// aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<smooth_l1_loss::schema> create_smooth_l1_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(smooth_l1_loss::name, smooth_l1_loss::overload_name)
      .typed<smooth_l1_loss::schema>();
}

// aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor
at::Tensor smooth_l1_loss::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
    
    static auto op = create_smooth_l1_loss_typed_handle();
    return op.call(self, target, reduction, beta);
}

// aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor
at::Tensor smooth_l1_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
    
    static auto op = create_smooth_l1_loss_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction, beta);
}

// aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<smooth_l1_loss_backward_grad_input::schema> create_smooth_l1_loss_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(smooth_l1_loss_backward_grad_input::name, smooth_l1_loss_backward_grad_input::overload_name)
      .typed<smooth_l1_loss_backward_grad_input::schema>();
}

// aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & smooth_l1_loss_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & grad_input) {
    
    static auto op = create_smooth_l1_loss_backward_grad_input_typed_handle();
    return op.call(grad_output, self, target, reduction, beta, grad_input);
}

// aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & smooth_l1_loss_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & grad_input) {
    
    static auto op = create_smooth_l1_loss_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, beta, grad_input);
}

// aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<smooth_l1_loss_backward::schema> create_smooth_l1_loss_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(smooth_l1_loss_backward::name, smooth_l1_loss_backward::overload_name)
      .typed<smooth_l1_loss_backward::schema>();
}

// aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor
at::Tensor smooth_l1_loss_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
    
    static auto op = create_smooth_l1_loss_backward_typed_handle();
    return op.call(grad_output, self, target, reduction, beta);
}

// aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor
at::Tensor smooth_l1_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
    
    static auto op = create_smooth_l1_loss_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, beta);
}

// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<huber_loss_out::schema> create_huber_loss_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(huber_loss_out::name, huber_loss_out::overload_name)
      .typed<huber_loss_out::schema>();
}

// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & huber_loss_out::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) {
    
    static auto op = create_huber_loss_out_typed_handle();
    return op.call(self, target, reduction, delta, out);
}

// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & huber_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) {
    
    static auto op = create_huber_loss_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction, delta, out);
}

// aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<huber_loss::schema> create_huber_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(huber_loss::name, huber_loss::overload_name)
      .typed<huber_loss::schema>();
}

// aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor
at::Tensor huber_loss::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
    
    static auto op = create_huber_loss_typed_handle();
    return op.call(self, target, reduction, delta);
}

// aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor
at::Tensor huber_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
    
    static auto op = create_huber_loss_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction, delta);
}

// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<huber_loss_backward_out::schema> create_huber_loss_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(huber_loss_backward_out::name, huber_loss_backward_out::overload_name)
      .typed<huber_loss_backward_out::schema>();
}

// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & huber_loss_backward_out::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input) {
    
    static auto op = create_huber_loss_backward_out_typed_handle();
    return op.call(grad_output, self, target, reduction, delta, grad_input);
}

// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & huber_loss_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input) {
    
    static auto op = create_huber_loss_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, delta, grad_input);
}

// aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<huber_loss_backward::schema> create_huber_loss_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(huber_loss_backward::name, huber_loss_backward::overload_name)
      .typed<huber_loss_backward::schema>();
}

// aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor
at::Tensor huber_loss_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
    
    static auto op = create_huber_loss_backward_typed_handle();
    return op.call(grad_output, self, target, reduction, delta);
}

// aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor
at::Tensor huber_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
    
    static auto op = create_huber_loss_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, delta);
}

// aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<soft_margin_loss_out::schema> create_soft_margin_loss_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(soft_margin_loss_out::name, soft_margin_loss_out::overload_name)
      .typed<soft_margin_loss_out::schema>();
}

// aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & soft_margin_loss_out::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_soft_margin_loss_out_typed_handle();
    return op.call(self, target, reduction, out);
}

// aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & soft_margin_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_soft_margin_loss_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction, out);
}

// aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<soft_margin_loss::schema> create_soft_margin_loss_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(soft_margin_loss::name, soft_margin_loss::overload_name)
      .typed<soft_margin_loss::schema>();
}

// aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
at::Tensor soft_margin_loss::call(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_soft_margin_loss_typed_handle();
    return op.call(self, target, reduction);
}

// aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
at::Tensor soft_margin_loss::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_soft_margin_loss_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, reduction);
}

// aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<soft_margin_loss_backward_grad_input::schema> create_soft_margin_loss_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(soft_margin_loss_backward_grad_input::name, soft_margin_loss_backward_grad_input::overload_name)
      .typed<soft_margin_loss_backward_grad_input::schema>();
}

// aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & soft_margin_loss_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
    
    static auto op = create_soft_margin_loss_backward_grad_input_typed_handle();
    return op.call(grad_output, self, target, reduction, grad_input);
}

// aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & soft_margin_loss_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
    
    static auto op = create_soft_margin_loss_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, reduction, grad_input);
}

// aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<soft_margin_loss_backward::schema> create_soft_margin_loss_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(soft_margin_loss_backward::name, soft_margin_loss_backward::overload_name)
      .typed<soft_margin_loss_backward::schema>();
}

// aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
at::Tensor soft_margin_loss_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_soft_margin_loss_backward_typed_handle();
    return op.call(grad_output, self, target, reduction);
}

// aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
at::Tensor soft_margin_loss_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
    
    static auto op = create_soft_margin_loss_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, target, reduction);
}

// aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<elu_out::schema> create_elu_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(elu_out::name, elu_out::overload_name)
      .typed<elu_out::schema>();
}

// aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & elu_out::call(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, at::Tensor & out) {
    
    static auto op = create_elu_out_typed_handle();
    return op.call(self, alpha, scale, input_scale, out);
}

// aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & elu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, at::Tensor & out) {
    
    static auto op = create_elu_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, alpha, scale, input_scale, out);
}

// aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<elu::schema> create_elu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(elu::name, elu::overload_name)
      .typed<elu::schema>();
}

// aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor
at::Tensor elu::call(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
    
    static auto op = create_elu_typed_handle();
    return op.call(self, alpha, scale, input_scale);
}

// aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor
at::Tensor elu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
    
    static auto op = create_elu_typed_handle();
    return op.redispatch(dispatchKeySet, self, alpha, scale, input_scale);
}

// aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<elu_backward_grad_input::schema> create_elu_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(elu_backward_grad_input::name, elu_backward_grad_input::overload_name)
      .typed<elu_backward_grad_input::schema>();
}

// aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & elu_backward_grad_input::call(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input) {
    
    static auto op = create_elu_backward_grad_input_typed_handle();
    return op.call(grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input);
}

// aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & elu_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input) {
    
    static auto op = create_elu_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input);
}

// aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<elu_backward::schema> create_elu_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(elu_backward::name, elu_backward::overload_name)
      .typed<elu_backward::schema>();
}

// aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor
at::Tensor elu_backward::call(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
    
    static auto op = create_elu_backward_typed_handle();
    return op.call(grad_output, alpha, scale, input_scale, is_result, self_or_result);
}

// aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor
at::Tensor elu_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
    
    static auto op = create_elu_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, alpha, scale, input_scale, is_result, self_or_result);
}

// aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<elu_::schema> create_elu__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(elu_::name, elu_::overload_name)
      .typed<elu_::schema>();
}

// aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)
at::Tensor & elu_::call(at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
    
    static auto op = create_elu__typed_handle();
    return op.call(self, alpha, scale, input_scale);
}

// aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)
at::Tensor & elu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
    
    static auto op = create_elu__typed_handle();
    return op.redispatch(dispatchKeySet, self, alpha, scale, input_scale);
}

// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<glu_out::schema> create_glu_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(glu_out::name, glu_out::overload_name)
      .typed<glu_out::schema>();
}

// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & glu_out::call(const at::Tensor & self, int64_t dim, at::Tensor & out) {
    
    static auto op = create_glu_out_typed_handle();
    return op.call(self, dim, out);
}

// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & glu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) {
    
    static auto op = create_glu_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, out);
}

// aten::glu(Tensor self, int dim=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<glu::schema> create_glu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(glu::name, glu::overload_name)
      .typed<glu::schema>();
}

// aten::glu(Tensor self, int dim=-1) -> Tensor
at::Tensor glu::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_glu_typed_handle();
    return op.call(self, dim);
}

// aten::glu(Tensor self, int dim=-1) -> Tensor
at::Tensor glu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_glu_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<glu_backward_grad_input::schema> create_glu_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(glu_backward_grad_input::name, glu_backward_grad_input::overload_name)
      .typed<glu_backward_grad_input::schema>();
}

// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & glu_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input) {
    
    static auto op = create_glu_backward_grad_input_typed_handle();
    return op.call(grad_output, self, dim, grad_input);
}

// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & glu_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input) {
    
    static auto op = create_glu_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, dim, grad_input);
}

// aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<glu_backward::schema> create_glu_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(glu_backward::name, glu_backward::overload_name)
      .typed<glu_backward::schema>();
}

// aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor
at::Tensor glu_backward::call(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_glu_backward_typed_handle();
    return op.call(grad_output, self, dim);
}

// aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor
at::Tensor glu_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_glu_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, dim);
}

// aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<glu_jvp::schema> create_glu_jvp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(glu_jvp::name, glu_jvp::overload_name)
      .typed<glu_jvp::schema>();
}

// aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor
at::Tensor glu_jvp::call(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
    
    static auto op = create_glu_jvp_typed_handle();
    return op.call(glu, x, dx, dim);
}

// aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor
at::Tensor glu_jvp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
    
    static auto op = create_glu_jvp_typed_handle();
    return op.redispatch(dispatchKeySet, glu, x, dx, dim);
}

// aten::glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<glu_backward_jvp::schema> create_glu_backward_jvp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(glu_backward_jvp::name, glu_backward_jvp::overload_name)
      .typed<glu_backward_jvp::schema>();
}

// aten::glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor
at::Tensor glu_backward_jvp::call(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) {
    
    static auto op = create_glu_backward_jvp_typed_handle();
    return op.call(grad_x, grad_glu, x, dgrad_glu, dx, dim);
}

// aten::glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor
at::Tensor glu_backward_jvp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) {
    
    static auto op = create_glu_backward_jvp_typed_handle();
    return op.redispatch(dispatchKeySet, grad_x, grad_glu, x, dgrad_glu, dx, dim);
}

// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hardsigmoid_out::schema> create_hardsigmoid_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardsigmoid_out::name, hardsigmoid_out::overload_name)
      .typed<hardsigmoid_out::schema>();
}

// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hardsigmoid_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_hardsigmoid_out_typed_handle();
    return op.call(self, out);
}

// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hardsigmoid_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_hardsigmoid_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::hardsigmoid(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hardsigmoid::schema> create_hardsigmoid_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardsigmoid::name, hardsigmoid::overload_name)
      .typed<hardsigmoid::schema>();
}

// aten::hardsigmoid(Tensor self) -> Tensor
at::Tensor hardsigmoid::call(const at::Tensor & self) {
    
    static auto op = create_hardsigmoid_typed_handle();
    return op.call(self);
}

// aten::hardsigmoid(Tensor self) -> Tensor
at::Tensor hardsigmoid::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_hardsigmoid_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hardsigmoid_::schema> create_hardsigmoid__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardsigmoid_::name, hardsigmoid_::overload_name)
      .typed<hardsigmoid_::schema>();
}

// aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & hardsigmoid_::call(at::Tensor & self) {
    
    static auto op = create_hardsigmoid__typed_handle();
    return op.call(self);
}

// aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & hardsigmoid_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_hardsigmoid__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hardsigmoid_backward_grad_input::schema> create_hardsigmoid_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardsigmoid_backward_grad_input::name, hardsigmoid_backward_grad_input::overload_name)
      .typed<hardsigmoid_backward_grad_input::schema>();
}

// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & hardsigmoid_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
    
    static auto op = create_hardsigmoid_backward_grad_input_typed_handle();
    return op.call(grad_output, self, grad_input);
}

// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & hardsigmoid_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
    
    static auto op = create_hardsigmoid_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, grad_input);
}

// aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hardsigmoid_backward::schema> create_hardsigmoid_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardsigmoid_backward::name, hardsigmoid_backward::overload_name)
      .typed<hardsigmoid_backward::schema>();
}

// aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor hardsigmoid_backward::call(const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create_hardsigmoid_backward_typed_handle();
    return op.call(grad_output, self);
}

// aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor hardsigmoid_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create_hardsigmoid_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self);
}

// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hardtanh_out::schema> create_hardtanh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardtanh_out::name, hardtanh_out::overload_name)
      .typed<hardtanh_out::schema>();
}

// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hardtanh_out::call(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) {
    
    static auto op = create_hardtanh_out_typed_handle();
    return op.call(self, min_val, max_val, out);
}

// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hardtanh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) {
    
    static auto op = create_hardtanh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, min_val, max_val, out);
}

// aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hardtanh::schema> create_hardtanh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardtanh::name, hardtanh::overload_name)
      .typed<hardtanh::schema>();
}

// aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor
at::Tensor hardtanh::call(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
    
    static auto op = create_hardtanh_typed_handle();
    return op.call(self, min_val, max_val);
}

// aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor
at::Tensor hardtanh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
    
    static auto op = create_hardtanh_typed_handle();
    return op.redispatch(dispatchKeySet, self, min_val, max_val);
}

// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hardtanh_backward_grad_input::schema> create_hardtanh_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardtanh_backward_grad_input::name, hardtanh_backward_grad_input::overload_name)
      .typed<hardtanh_backward_grad_input::schema>();
}

// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & hardtanh_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input) {
    
    static auto op = create_hardtanh_backward_grad_input_typed_handle();
    return op.call(grad_output, self, min_val, max_val, grad_input);
}

// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & hardtanh_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input) {
    
    static auto op = create_hardtanh_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, min_val, max_val, grad_input);
}

// aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hardtanh_backward::schema> create_hardtanh_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardtanh_backward::name, hardtanh_backward::overload_name)
      .typed<hardtanh_backward::schema>();
}

// aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor
at::Tensor hardtanh_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
    
    static auto op = create_hardtanh_backward_typed_handle();
    return op.call(grad_output, self, min_val, max_val);
}

// aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor
at::Tensor hardtanh_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
    
    static auto op = create_hardtanh_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, min_val, max_val);
}

// aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hardtanh_::schema> create_hardtanh__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardtanh_::name, hardtanh_::overload_name)
      .typed<hardtanh_::schema>();
}

// aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)
at::Tensor & hardtanh_::call(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
    
    static auto op = create_hardtanh__typed_handle();
    return op.call(self, min_val, max_val);
}

// aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)
at::Tensor & hardtanh_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
    
    static auto op = create_hardtanh__typed_handle();
    return op.redispatch(dispatchKeySet, self, min_val, max_val);
}

// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hardswish_out::schema> create_hardswish_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardswish_out::name, hardswish_out::overload_name)
      .typed<hardswish_out::schema>();
}

// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hardswish_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_hardswish_out_typed_handle();
    return op.call(self, out);
}

// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hardswish_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_hardswish_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::hardswish(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hardswish::schema> create_hardswish_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardswish::name, hardswish::overload_name)
      .typed<hardswish::schema>();
}

// aten::hardswish(Tensor self) -> Tensor
at::Tensor hardswish::call(const at::Tensor & self) {
    
    static auto op = create_hardswish_typed_handle();
    return op.call(self);
}

// aten::hardswish(Tensor self) -> Tensor
at::Tensor hardswish::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_hardswish_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::hardswish_(Tensor(a!) self) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hardswish_::schema> create_hardswish__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardswish_::name, hardswish_::overload_name)
      .typed<hardswish_::schema>();
}

// aten::hardswish_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & hardswish_::call(at::Tensor & self) {
    
    static auto op = create_hardswish__typed_handle();
    return op.call(self);
}

// aten::hardswish_(Tensor(a!) self) -> Tensor(a!)
at::Tensor & hardswish_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
    
    static auto op = create_hardswish__typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<hardswish_backward::schema> create_hardswish_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardswish_backward::name, hardswish_backward::overload_name)
      .typed<hardswish_backward::schema>();
}

// aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor hardswish_backward::call(const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create_hardswish_backward_typed_handle();
    return op.call(grad_output, self);
}

// aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor hardswish_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create_hardswish_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self);
}

// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<leaky_relu_out::schema> create_leaky_relu_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(leaky_relu_out::name, leaky_relu_out::overload_name)
      .typed<leaky_relu_out::schema>();
}

// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & leaky_relu_out::call(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) {
    
    static auto op = create_leaky_relu_out_typed_handle();
    return op.call(self, negative_slope, out);
}

// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & leaky_relu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) {
    
    static auto op = create_leaky_relu_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, negative_slope, out);
}

// aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<leaky_relu::schema> create_leaky_relu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(leaky_relu::name, leaky_relu::overload_name)
      .typed<leaky_relu::schema>();
}

// aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor
at::Tensor leaky_relu::call(const at::Tensor & self, const at::Scalar & negative_slope) {
    
    static auto op = create_leaky_relu_typed_handle();
    return op.call(self, negative_slope);
}

// aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor
at::Tensor leaky_relu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & negative_slope) {
    
    static auto op = create_leaky_relu_typed_handle();
    return op.redispatch(dispatchKeySet, self, negative_slope);
}

// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<leaky_relu_backward_grad_input::schema> create_leaky_relu_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(leaky_relu_backward_grad_input::name, leaky_relu_backward_grad_input::overload_name)
      .typed<leaky_relu_backward_grad_input::schema>();
}

// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & leaky_relu_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) {
    
    static auto op = create_leaky_relu_backward_grad_input_typed_handle();
    return op.call(grad_output, self, negative_slope, self_is_result, grad_input);
}

// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & leaky_relu_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) {
    
    static auto op = create_leaky_relu_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, negative_slope, self_is_result, grad_input);
}

// aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<leaky_relu_backward::schema> create_leaky_relu_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(leaky_relu_backward::name, leaky_relu_backward::overload_name)
      .typed<leaky_relu_backward::schema>();
}

// aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor
at::Tensor leaky_relu_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
    
    static auto op = create_leaky_relu_backward_typed_handle();
    return op.call(grad_output, self, negative_slope, self_is_result);
}

// aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor
at::Tensor leaky_relu_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
    
    static auto op = create_leaky_relu_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, negative_slope, self_is_result);
}

// aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<leaky_relu_::schema> create_leaky_relu__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(leaky_relu_::name, leaky_relu_::overload_name)
      .typed<leaky_relu_::schema>();
}

// aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)
at::Tensor & leaky_relu_::call(at::Tensor & self, const at::Scalar & negative_slope) {
    
    static auto op = create_leaky_relu__typed_handle();
    return op.call(self, negative_slope);
}

// aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)
at::Tensor & leaky_relu_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & negative_slope) {
    
    static auto op = create_leaky_relu__typed_handle();
    return op.redispatch(dispatchKeySet, self, negative_slope);
}

// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log_sigmoid_out::schema> create_log_sigmoid_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_sigmoid_out::name, log_sigmoid_out::overload_name)
      .typed<log_sigmoid_out::schema>();
}

// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log_sigmoid_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_log_sigmoid_out_typed_handle();
    return op.call(self, out);
}

// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log_sigmoid_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_log_sigmoid_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::log_sigmoid(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<log_sigmoid::schema> create_log_sigmoid_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_sigmoid::name, log_sigmoid::overload_name)
      .typed<log_sigmoid::schema>();
}

// aten::log_sigmoid(Tensor self) -> Tensor
at::Tensor log_sigmoid::call(const at::Tensor & self) {
    
    static auto op = create_log_sigmoid_typed_handle();
    return op.call(self);
}

// aten::log_sigmoid(Tensor self) -> Tensor
at::Tensor log_sigmoid::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_log_sigmoid_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<log_sigmoid_forward_output::schema> create_log_sigmoid_forward_output_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_sigmoid_forward_output::name, log_sigmoid_forward_output::overload_name)
      .typed<log_sigmoid_forward_output::schema>();
}

// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> log_sigmoid_forward_output::call(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer) {
    
    static auto op = create_log_sigmoid_forward_output_typed_handle();
    return op.call(self, output, buffer);
}

// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> log_sigmoid_forward_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & output, at::Tensor & buffer) {
    
    static auto op = create_log_sigmoid_forward_output_typed_handle();
    return op.redispatch(dispatchKeySet, self, output, buffer);
}

// aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)
static C10_NOINLINE c10::TypedOperatorHandle<log_sigmoid_forward::schema> create_log_sigmoid_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_sigmoid_forward::name, log_sigmoid_forward::overload_name)
      .typed<log_sigmoid_forward::schema>();
}

// aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)
::std::tuple<at::Tensor,at::Tensor> log_sigmoid_forward::call(const at::Tensor & self) {
    
    static auto op = create_log_sigmoid_forward_typed_handle();
    return op.call(self);
}

// aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)
::std::tuple<at::Tensor,at::Tensor> log_sigmoid_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_log_sigmoid_forward_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log_sigmoid_backward_grad_input::schema> create_log_sigmoid_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_sigmoid_backward_grad_input::name, log_sigmoid_backward_grad_input::overload_name)
      .typed<log_sigmoid_backward_grad_input::schema>();
}

// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & log_sigmoid_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) {
    
    static auto op = create_log_sigmoid_backward_grad_input_typed_handle();
    return op.call(grad_output, self, buffer, grad_input);
}

// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & log_sigmoid_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) {
    
    static auto op = create_log_sigmoid_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, buffer, grad_input);
}

// aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<log_sigmoid_backward::schema> create_log_sigmoid_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_sigmoid_backward::name, log_sigmoid_backward::overload_name)
      .typed<log_sigmoid_backward::schema>();
}

// aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor
at::Tensor log_sigmoid_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
    
    static auto op = create_log_sigmoid_backward_typed_handle();
    return op.call(grad_output, self, buffer);
}

// aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor
at::Tensor log_sigmoid_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
    
    static auto op = create_log_sigmoid_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, buffer);
}

// aten::rrelu_with_noise.out(Tensor self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rrelu_with_noise_out::schema> create_rrelu_with_noise_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rrelu_with_noise_out::name, rrelu_with_noise_out::overload_name)
      .typed<rrelu_with_noise_out::schema>();
}

// aten::rrelu_with_noise.out(Tensor self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rrelu_with_noise_out::call(const at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_rrelu_with_noise_out_typed_handle();
    return op.call(self, noise, lower, upper, training, generator, out);
}

// aten::rrelu_with_noise.out(Tensor self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rrelu_with_noise_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_rrelu_with_noise_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, noise, lower, upper, training, generator, out);
}

// aten::rrelu_with_noise(Tensor self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rrelu_with_noise::schema> create_rrelu_with_noise_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rrelu_with_noise::name, rrelu_with_noise::overload_name)
      .typed<rrelu_with_noise::schema>();
}

// aten::rrelu_with_noise(Tensor self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
at::Tensor rrelu_with_noise::call(const at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu_with_noise_typed_handle();
    return op.call(self, noise, lower, upper, training, generator);
}

// aten::rrelu_with_noise(Tensor self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
at::Tensor rrelu_with_noise::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu_with_noise_typed_handle();
    return op.redispatch(dispatchKeySet, self, noise, lower, upper, training, generator);
}

// aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<rrelu_with_noise_backward::schema> create_rrelu_with_noise_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rrelu_with_noise_backward::name, rrelu_with_noise_backward::overload_name)
      .typed<rrelu_with_noise_backward::schema>();
}

// aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor
at::Tensor rrelu_with_noise_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) {
    
    static auto op = create_rrelu_with_noise_backward_typed_handle();
    return op.call(grad_output, self, noise, lower, upper, training, self_is_result);
}

// aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor
at::Tensor rrelu_with_noise_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) {
    
    static auto op = create_rrelu_with_noise_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, noise, lower, upper, training, self_is_result);
}

// aten::rrelu_with_noise_(Tensor(a!) self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rrelu_with_noise_::schema> create_rrelu_with_noise__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rrelu_with_noise_::name, rrelu_with_noise_::overload_name)
      .typed<rrelu_with_noise_::schema>();
}

// aten::rrelu_with_noise_(Tensor(a!) self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
at::Tensor & rrelu_with_noise_::call(at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu_with_noise__typed_handle();
    return op.call(self, noise, lower, upper, training, generator);
}

// aten::rrelu_with_noise_(Tensor(a!) self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
at::Tensor & rrelu_with_noise_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu_with_noise__typed_handle();
    return op.redispatch(dispatchKeySet, self, noise, lower, upper, training, generator);
}

// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<softplus_out::schema> create_softplus_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(softplus_out::name, softplus_out::overload_name)
      .typed<softplus_out::schema>();
}

// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & softplus_out::call(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) {
    
    static auto op = create_softplus_out_typed_handle();
    return op.call(self, beta, threshold, out);
}

// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & softplus_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) {
    
    static auto op = create_softplus_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, beta, threshold, out);
}

// aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<softplus::schema> create_softplus_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(softplus::name, softplus::overload_name)
      .typed<softplus::schema>();
}

// aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor
at::Tensor softplus::call(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
    
    static auto op = create_softplus_typed_handle();
    return op.call(self, beta, threshold);
}

// aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor
at::Tensor softplus::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
    
    static auto op = create_softplus_typed_handle();
    return op.redispatch(dispatchKeySet, self, beta, threshold);
}

// aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<softplus_backward_grad_input::schema> create_softplus_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(softplus_backward_grad_input::name, softplus_backward_grad_input::overload_name)
      .typed<softplus_backward_grad_input::schema>();
}

// aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & softplus_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & grad_input) {
    
    static auto op = create_softplus_backward_grad_input_typed_handle();
    return op.call(grad_output, self, beta, threshold, grad_input);
}

// aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & softplus_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & grad_input) {
    
    static auto op = create_softplus_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, beta, threshold, grad_input);
}

// aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<softplus_backward::schema> create_softplus_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(softplus_backward::name, softplus_backward::overload_name)
      .typed<softplus_backward::schema>();
}

// aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor
at::Tensor softplus_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
    
    static auto op = create_softplus_backward_typed_handle();
    return op.call(grad_output, self, beta, threshold);
}

// aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor
at::Tensor softplus_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
    
    static auto op = create_softplus_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, beta, threshold);
}

// aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<softshrink_out::schema> create_softshrink_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(softshrink_out::name, softshrink_out::overload_name)
      .typed<softshrink_out::schema>();
}

// aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & softshrink_out::call(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
    
    static auto op = create_softshrink_out_typed_handle();
    return op.call(self, lambd, out);
}

// aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & softshrink_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
    
    static auto op = create_softshrink_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, lambd, out);
}

// aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<softshrink::schema> create_softshrink_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(softshrink::name, softshrink::overload_name)
      .typed<softshrink::schema>();
}

// aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor
at::Tensor softshrink::call(const at::Tensor & self, const at::Scalar & lambd) {
    
    static auto op = create_softshrink_typed_handle();
    return op.call(self, lambd);
}

// aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor
at::Tensor softshrink::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd) {
    
    static auto op = create_softshrink_typed_handle();
    return op.redispatch(dispatchKeySet, self, lambd);
}

// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<softshrink_backward_grad_input::schema> create_softshrink_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(softshrink_backward_grad_input::name, softshrink_backward_grad_input::overload_name)
      .typed<softshrink_backward_grad_input::schema>();
}

// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & softshrink_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
    
    static auto op = create_softshrink_backward_grad_input_typed_handle();
    return op.call(grad_output, self, lambd, grad_input);
}

// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & softshrink_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
    
    static auto op = create_softshrink_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, lambd, grad_input);
}

// aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<softshrink_backward::schema> create_softshrink_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(softshrink_backward::name, softshrink_backward::overload_name)
      .typed<softshrink_backward::schema>();
}

// aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor
at::Tensor softshrink_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
    
    static auto op = create_softshrink_backward_typed_handle();
    return op.call(grad_output, self, lambd);
}

// aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor
at::Tensor softshrink_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
    
    static auto op = create_softshrink_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, lambd);
}

// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_avg_pool2d_out::schema> create_adaptive_avg_pool2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_avg_pool2d_out::name, adaptive_avg_pool2d_out::overload_name)
      .typed<adaptive_avg_pool2d_out::schema>();
}

// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & adaptive_avg_pool2d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create_adaptive_avg_pool2d_out_typed_handle();
    return op.call(self, output_size, out);
}

// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & adaptive_avg_pool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create_adaptive_avg_pool2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, out);
}

// aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_avg_pool2d::schema> create_adaptive_avg_pool2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_avg_pool2d::name, adaptive_avg_pool2d::overload_name)
      .typed<adaptive_avg_pool2d::schema>();
}

// aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
at::Tensor adaptive_avg_pool2d::call(const at::Tensor & self, c10::SymIntArrayRef output_size) {
    
    static auto op = create_adaptive_avg_pool2d_typed_handle();
    return op.call(self, output_size);
}

// aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
at::Tensor adaptive_avg_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size) {
    
    static auto op = create_adaptive_avg_pool2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size);
}

// aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_adaptive_avg_pool2d::schema> create_mkldnn_adaptive_avg_pool2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_adaptive_avg_pool2d::name, mkldnn_adaptive_avg_pool2d::overload_name)
      .typed<mkldnn_adaptive_avg_pool2d::schema>();
}

// aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
at::Tensor mkldnn_adaptive_avg_pool2d::call(const at::Tensor & self, at::IntArrayRef output_size) {
    
    static auto op = create_mkldnn_adaptive_avg_pool2d_typed_handle();
    return op.call(self, output_size);
}

// aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
at::Tensor mkldnn_adaptive_avg_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) {
    
    static auto op = create_mkldnn_adaptive_avg_pool2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size);
}

// aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_adaptive_avg_pool2d_out::schema> create_mkldnn_adaptive_avg_pool2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_adaptive_avg_pool2d_out::name, mkldnn_adaptive_avg_pool2d_out::overload_name)
      .typed<mkldnn_adaptive_avg_pool2d_out::schema>();
}

// aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_adaptive_avg_pool2d_out::call(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create_mkldnn_adaptive_avg_pool2d_out_typed_handle();
    return op.call(self, output_size, out);
}

// aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_adaptive_avg_pool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create_mkldnn_adaptive_avg_pool2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, out);
}

// aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_adaptive_avg_pool2d_backward::schema> create_mkldnn_adaptive_avg_pool2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_adaptive_avg_pool2d_backward::name, mkldnn_adaptive_avg_pool2d_backward::overload_name)
      .typed<mkldnn_adaptive_avg_pool2d_backward::schema>();
}

// aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor mkldnn_adaptive_avg_pool2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create_mkldnn_adaptive_avg_pool2d_backward_typed_handle();
    return op.call(grad_output, self);
}

// aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor mkldnn_adaptive_avg_pool2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create_mkldnn_adaptive_avg_pool2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self);
}

// aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool2d::schema> create__adaptive_avg_pool2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_adaptive_avg_pool2d::name, _adaptive_avg_pool2d::overload_name)
      .typed<_adaptive_avg_pool2d::schema>();
}

// aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
at::Tensor _adaptive_avg_pool2d::call(const at::Tensor & self, c10::SymIntArrayRef output_size) {
    
    static auto op = create__adaptive_avg_pool2d_typed_handle();
    return op.call(self, output_size);
}

// aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
at::Tensor _adaptive_avg_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size) {
    
    static auto op = create__adaptive_avg_pool2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size);
}

// aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool2d_backward::schema> create__adaptive_avg_pool2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_adaptive_avg_pool2d_backward::name, _adaptive_avg_pool2d_backward::overload_name)
      .typed<_adaptive_avg_pool2d_backward::schema>();
}

// aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor _adaptive_avg_pool2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create__adaptive_avg_pool2d_backward_typed_handle();
    return op.call(grad_output, self);
}

// aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor _adaptive_avg_pool2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create__adaptive_avg_pool2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self);
}

// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_avg_pool3d_out::schema> create_adaptive_avg_pool3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_avg_pool3d_out::name, adaptive_avg_pool3d_out::overload_name)
      .typed<adaptive_avg_pool3d_out::schema>();
}

// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & adaptive_avg_pool3d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create_adaptive_avg_pool3d_out_typed_handle();
    return op.call(self, output_size, out);
}

// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & adaptive_avg_pool3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create_adaptive_avg_pool3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, out);
}

// aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_avg_pool3d::schema> create_adaptive_avg_pool3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_avg_pool3d::name, adaptive_avg_pool3d::overload_name)
      .typed<adaptive_avg_pool3d::schema>();
}

// aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
at::Tensor adaptive_avg_pool3d::call(const at::Tensor & self, c10::SymIntArrayRef output_size) {
    
    static auto op = create_adaptive_avg_pool3d_typed_handle();
    return op.call(self, output_size);
}

// aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
at::Tensor adaptive_avg_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size) {
    
    static auto op = create_adaptive_avg_pool3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size);
}

// aten::_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool3d::schema> create__adaptive_avg_pool3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_adaptive_avg_pool3d::name, _adaptive_avg_pool3d::overload_name)
      .typed<_adaptive_avg_pool3d::schema>();
}

// aten::_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
at::Tensor _adaptive_avg_pool3d::call(const at::Tensor & self, c10::SymIntArrayRef output_size) {
    
    static auto op = create__adaptive_avg_pool3d_typed_handle();
    return op.call(self, output_size);
}

// aten::_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
at::Tensor _adaptive_avg_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size) {
    
    static auto op = create__adaptive_avg_pool3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size);
}

// aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_avg_pool3d_backward_grad_input::schema> create_adaptive_avg_pool3d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_avg_pool3d_backward_grad_input::name, adaptive_avg_pool3d_backward_grad_input::overload_name)
      .typed<adaptive_avg_pool3d_backward_grad_input::schema>();
}

// aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & adaptive_avg_pool3d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
    
    static auto op = create_adaptive_avg_pool3d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, grad_input);
}

// aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & adaptive_avg_pool3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
    
    static auto op = create_adaptive_avg_pool3d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, grad_input);
}

// aten::_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool3d_backward::schema> create__adaptive_avg_pool3d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_adaptive_avg_pool3d_backward::name, _adaptive_avg_pool3d_backward::overload_name)
      .typed<_adaptive_avg_pool3d_backward::schema>();
}

// aten::_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor _adaptive_avg_pool3d_backward::call(const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create__adaptive_avg_pool3d_backward_typed_handle();
    return op.call(grad_output, self);
}

// aten::_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor
at::Tensor _adaptive_avg_pool3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) {
    
    static auto op = create__adaptive_avg_pool3d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self);
}

// aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool2d_out::schema> create_adaptive_max_pool2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_max_pool2d_out::name, adaptive_max_pool2d_out::overload_name)
      .typed<adaptive_max_pool2d_out::schema>();
}

// aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_out::call(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
    
    static auto op = create_adaptive_max_pool2d_out_typed_handle();
    return op.call(self, output_size, out, indices);
}

// aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
    
    static auto op = create_adaptive_max_pool2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, out, indices);
}

// aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool2d::schema> create_adaptive_max_pool2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_max_pool2d::name, adaptive_max_pool2d::overload_name)
      .typed<adaptive_max_pool2d::schema>();
}

// aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d::call(const at::Tensor & self, at::IntArrayRef output_size) {
    
    static auto op = create_adaptive_max_pool2d_typed_handle();
    return op.call(self, output_size);
}

// aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) {
    
    static auto op = create_adaptive_max_pool2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size);
}

// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool2d_backward_grad_input::schema> create_adaptive_max_pool2d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_max_pool2d_backward_grad_input::name, adaptive_max_pool2d_backward_grad_input::overload_name)
      .typed<adaptive_max_pool2d_backward_grad_input::schema>();
}

// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & adaptive_max_pool2d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_adaptive_max_pool2d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, indices, grad_input);
}

// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & adaptive_max_pool2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_adaptive_max_pool2d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, indices, grad_input);
}

// aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool2d_backward::schema> create_adaptive_max_pool2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_max_pool2d_backward::name, adaptive_max_pool2d_backward::overload_name)
      .typed<adaptive_max_pool2d_backward::schema>();
}

// aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
at::Tensor adaptive_max_pool2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
    
    static auto op = create_adaptive_max_pool2d_backward_typed_handle();
    return op.call(grad_output, self, indices);
}

// aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
at::Tensor adaptive_max_pool2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
    
    static auto op = create_adaptive_max_pool2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, indices);
}

// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool3d_out::schema> create_adaptive_max_pool3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_max_pool3d_out::name, adaptive_max_pool3d_out::overload_name)
      .typed<adaptive_max_pool3d_out::schema>();
}

// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_out::call(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
    
    static auto op = create_adaptive_max_pool3d_out_typed_handle();
    return op.call(self, output_size, out, indices);
}

// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
    
    static auto op = create_adaptive_max_pool3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, out, indices);
}

// aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool3d::schema> create_adaptive_max_pool3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_max_pool3d::name, adaptive_max_pool3d::overload_name)
      .typed<adaptive_max_pool3d::schema>();
}

// aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d::call(const at::Tensor & self, at::IntArrayRef output_size) {
    
    static auto op = create_adaptive_max_pool3d_typed_handle();
    return op.call(self, output_size);
}

// aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) {
    
    static auto op = create_adaptive_max_pool3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size);
}

// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool3d_backward_grad_input::schema> create_adaptive_max_pool3d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_max_pool3d_backward_grad_input::name, adaptive_max_pool3d_backward_grad_input::overload_name)
      .typed<adaptive_max_pool3d_backward_grad_input::schema>();
}

// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & adaptive_max_pool3d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_adaptive_max_pool3d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, indices, grad_input);
}

// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & adaptive_max_pool3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_adaptive_max_pool3d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, indices, grad_input);
}

// aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_max_pool3d_backward::schema> create_adaptive_max_pool3d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_max_pool3d_backward::name, adaptive_max_pool3d_backward::overload_name)
      .typed<adaptive_max_pool3d_backward::schema>();
}

// aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
at::Tensor adaptive_max_pool3d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
    
    static auto op = create_adaptive_max_pool3d_backward_typed_handle();
    return op.call(grad_output, self, indices);
}

// aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
at::Tensor adaptive_max_pool3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
    
    static auto op = create_adaptive_max_pool3d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, indices);
}

// aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<avg_pool2d_out::schema> create_avg_pool2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(avg_pool2d_out::name, avg_pool2d_out::overload_name)
      .typed<avg_pool2d_out::schema>();
}

// aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & avg_pool2d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & out) {
    
    static auto op = create_avg_pool2d_out_typed_handle();
    return op.call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
}

// aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & avg_pool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & out) {
    
    static auto op = create_avg_pool2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
}

// aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<avg_pool2d::schema> create_avg_pool2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(avg_pool2d::name, avg_pool2d::overload_name)
      .typed<avg_pool2d::schema>();
}

// aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
at::Tensor avg_pool2d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
    
    static auto op = create_avg_pool2d_typed_handle();
    return op.call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
}

// aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
at::Tensor avg_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
    
    static auto op = create_avg_pool2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
}

// aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<avg_pool2d_backward_grad_input::schema> create_avg_pool2d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(avg_pool2d_backward_grad_input::name, avg_pool2d_backward_grad_input::overload_name)
      .typed<avg_pool2d_backward_grad_input::schema>();
}

// aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & avg_pool2d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & grad_input) {
    
    static auto op = create_avg_pool2d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
}

// aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & avg_pool2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & grad_input) {
    
    static auto op = create_avg_pool2d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
}

// aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<avg_pool2d_backward::schema> create_avg_pool2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(avg_pool2d_backward::name, avg_pool2d_backward::overload_name)
      .typed<avg_pool2d_backward::schema>();
}

// aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
at::Tensor avg_pool2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
    
    static auto op = create_avg_pool2d_backward_typed_handle();
    return op.call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
}

// aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
at::Tensor avg_pool2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
    
    static auto op = create_avg_pool2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
}

// aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<avg_pool3d_out::schema> create_avg_pool3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(avg_pool3d_out::name, avg_pool3d_out::overload_name)
      .typed<avg_pool3d_out::schema>();
}

// aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & avg_pool3d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & out) {
    
    static auto op = create_avg_pool3d_out_typed_handle();
    return op.call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
}

// aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & avg_pool3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & out) {
    
    static auto op = create_avg_pool3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
}

// aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<avg_pool3d::schema> create_avg_pool3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(avg_pool3d::name, avg_pool3d::overload_name)
      .typed<avg_pool3d::schema>();
}

// aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
at::Tensor avg_pool3d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
    
    static auto op = create_avg_pool3d_typed_handle();
    return op.call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
}

// aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
at::Tensor avg_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
    
    static auto op = create_avg_pool3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
}

// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<avg_pool3d_backward_grad_input::schema> create_avg_pool3d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(avg_pool3d_backward_grad_input::name, avg_pool3d_backward_grad_input::overload_name)
      .typed<avg_pool3d_backward_grad_input::schema>();
}

// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & avg_pool3d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & grad_input) {
    
    static auto op = create_avg_pool3d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
}

// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & avg_pool3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & grad_input) {
    
    static auto op = create_avg_pool3d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
}

// aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<avg_pool3d_backward::schema> create_avg_pool3d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(avg_pool3d_backward::name, avg_pool3d_backward::overload_name)
      .typed<avg_pool3d_backward::schema>();
}

// aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
at::Tensor avg_pool3d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
    
    static auto op = create_avg_pool3d_backward_typed_handle();
    return op.call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
}

// aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
at::Tensor avg_pool3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override) {
    
    static auto op = create_avg_pool3d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
}

// aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool2d_output::schema> create_fractional_max_pool2d_output_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fractional_max_pool2d_output::name, fractional_max_pool2d_output::overload_name)
      .typed<fractional_max_pool2d_output::schema>();
}

// aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool2d_output::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
    
    static auto op = create_fractional_max_pool2d_output_typed_handle();
    return op.call(self, kernel_size, output_size, random_samples, output, indices);
}

// aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool2d_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
    
    static auto op = create_fractional_max_pool2d_output_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples, output, indices);
}

// aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool2d::schema> create_fractional_max_pool2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fractional_max_pool2d::name, fractional_max_pool2d::overload_name)
      .typed<fractional_max_pool2d::schema>();
}

// aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> fractional_max_pool2d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
    
    static auto op = create_fractional_max_pool2d_typed_handle();
    return op.call(self, kernel_size, output_size, random_samples);
}

// aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> fractional_max_pool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
    
    static auto op = create_fractional_max_pool2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples);
}

// aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool2d_backward_grad_input::schema> create_fractional_max_pool2d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fractional_max_pool2d_backward_grad_input::name, fractional_max_pool2d_backward_grad_input::overload_name)
      .typed<fractional_max_pool2d_backward_grad_input::schema>();
}

// aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & fractional_max_pool2d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_fractional_max_pool2d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, kernel_size, output_size, indices, grad_input);
}

// aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & fractional_max_pool2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_fractional_max_pool2d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices, grad_input);
}

// aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool2d_backward::schema> create_fractional_max_pool2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fractional_max_pool2d_backward::name, fractional_max_pool2d_backward::overload_name)
      .typed<fractional_max_pool2d_backward::schema>();
}

// aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor
at::Tensor fractional_max_pool2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
    
    static auto op = create_fractional_max_pool2d_backward_typed_handle();
    return op.call(grad_output, self, kernel_size, output_size, indices);
}

// aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor
at::Tensor fractional_max_pool2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
    
    static auto op = create_fractional_max_pool2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices);
}

// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool3d_output::schema> create_fractional_max_pool3d_output_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fractional_max_pool3d_output::name, fractional_max_pool3d_output::overload_name)
      .typed<fractional_max_pool3d_output::schema>();
}

// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool3d_output::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
    
    static auto op = create_fractional_max_pool3d_output_typed_handle();
    return op.call(self, kernel_size, output_size, random_samples, output, indices);
}

// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool3d_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
    
    static auto op = create_fractional_max_pool3d_output_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples, output, indices);
}

// aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool3d::schema> create_fractional_max_pool3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fractional_max_pool3d::name, fractional_max_pool3d::overload_name)
      .typed<fractional_max_pool3d::schema>();
}

// aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> fractional_max_pool3d::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
    
    static auto op = create_fractional_max_pool3d_typed_handle();
    return op.call(self, kernel_size, output_size, random_samples);
}

// aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> fractional_max_pool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
    
    static auto op = create_fractional_max_pool3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples);
}

// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool3d_backward_grad_input::schema> create_fractional_max_pool3d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fractional_max_pool3d_backward_grad_input::name, fractional_max_pool3d_backward_grad_input::overload_name)
      .typed<fractional_max_pool3d_backward_grad_input::schema>();
}

// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & fractional_max_pool3d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_fractional_max_pool3d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, kernel_size, output_size, indices, grad_input);
}

// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & fractional_max_pool3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_fractional_max_pool3d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices, grad_input);
}

// aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fractional_max_pool3d_backward::schema> create_fractional_max_pool3d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fractional_max_pool3d_backward::name, fractional_max_pool3d_backward::overload_name)
      .typed<fractional_max_pool3d_backward::schema>();
}

// aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor
at::Tensor fractional_max_pool3d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
    
    static auto op = create_fractional_max_pool3d_backward_typed_handle();
    return op.call(grad_output, self, kernel_size, output_size, indices);
}

// aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor
at::Tensor fractional_max_pool3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
    
    static auto op = create_fractional_max_pool3d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices);
}

// aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<max_pool2d_with_indices_out::schema> create_max_pool2d_with_indices_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool2d_with_indices_out::name, max_pool2d_with_indices_out::overload_name)
      .typed<max_pool2d_with_indices_out::schema>();
}

// aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> max_pool2d_with_indices_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
    
    static auto op = create_max_pool2d_with_indices_out_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
}

// aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> max_pool2d_with_indices_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
    
    static auto op = create_max_pool2d_with_indices_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
}

// aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<max_pool2d_with_indices::schema> create_max_pool2d_with_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool2d_with_indices::name, max_pool2d_with_indices::overload_name)
      .typed<max_pool2d_with_indices::schema>();
}

// aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> max_pool2d_with_indices::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_max_pool2d_with_indices_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> max_pool2d_with_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_max_pool2d_with_indices_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<max_pool2d_with_indices_backward_grad_input::schema> create_max_pool2d_with_indices_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool2d_with_indices_backward_grad_input::name, max_pool2d_with_indices_backward_grad_input::overload_name)
      .typed<max_pool2d_with_indices_backward_grad_input::schema>();
}

// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & max_pool2d_with_indices_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_max_pool2d_with_indices_backward_grad_input_typed_handle();
    return op.call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
}

// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & max_pool2d_with_indices_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_max_pool2d_with_indices_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
}

// aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<max_pool2d_with_indices_backward::schema> create_max_pool2d_with_indices_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool2d_with_indices_backward::name, max_pool2d_with_indices_backward::overload_name)
      .typed<max_pool2d_with_indices_backward::schema>();
}

// aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor
at::Tensor max_pool2d_with_indices_backward::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
    
    static auto op = create_max_pool2d_with_indices_backward_typed_handle();
    return op.call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
}

// aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor
at::Tensor max_pool2d_with_indices_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
    
    static auto op = create_max_pool2d_with_indices_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
}

// aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<max_pool3d_with_indices_out::schema> create_max_pool3d_with_indices_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool3d_with_indices_out::name, max_pool3d_with_indices_out::overload_name)
      .typed<max_pool3d_with_indices_out::schema>();
}

// aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> max_pool3d_with_indices_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
    
    static auto op = create_max_pool3d_with_indices_out_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
}

// aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> max_pool3d_with_indices_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
    
    static auto op = create_max_pool3d_with_indices_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
}

// aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<max_pool3d_with_indices::schema> create_max_pool3d_with_indices_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool3d_with_indices::name, max_pool3d_with_indices::overload_name)
      .typed<max_pool3d_with_indices::schema>();
}

// aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> max_pool3d_with_indices::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_max_pool3d_with_indices_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> max_pool3d_with_indices::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
    
    static auto op = create_max_pool3d_with_indices_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode);
}

// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<max_pool3d_with_indices_backward_grad_input::schema> create_max_pool3d_with_indices_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool3d_with_indices_backward_grad_input::name, max_pool3d_with_indices_backward_grad_input::overload_name)
      .typed<max_pool3d_with_indices_backward_grad_input::schema>();
}

// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & max_pool3d_with_indices_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_max_pool3d_with_indices_backward_grad_input_typed_handle();
    return op.call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
}

// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & max_pool3d_with_indices_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
    
    static auto op = create_max_pool3d_with_indices_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
}

// aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<max_pool3d_with_indices_backward::schema> create_max_pool3d_with_indices_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool3d_with_indices_backward::name, max_pool3d_with_indices_backward::overload_name)
      .typed<max_pool3d_with_indices_backward::schema>();
}

// aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor
at::Tensor max_pool3d_with_indices_backward::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
    
    static auto op = create_max_pool3d_with_indices_backward_typed_handle();
    return op.call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
}

// aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor
at::Tensor max_pool3d_with_indices_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
    
    static auto op = create_max_pool3d_with_indices_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
}

// aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<max_unpool2d_out::schema> create_max_unpool2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_unpool2d_out::name, max_unpool2d_out::overload_name)
      .typed<max_unpool2d_out::schema>();
}

// aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & max_unpool2d_out::call(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create_max_unpool2d_out_typed_handle();
    return op.call(self, indices, output_size, out);
}

// aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & max_unpool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create_max_unpool2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, output_size, out);
}

// aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<max_unpool2d::schema> create_max_unpool2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_unpool2d::name, max_unpool2d::overload_name)
      .typed<max_unpool2d::schema>();
}

// aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor
at::Tensor max_unpool2d::call(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) {
    
    static auto op = create_max_unpool2d_typed_handle();
    return op.call(self, indices, output_size);
}

// aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor
at::Tensor max_unpool2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) {
    
    static auto op = create_max_unpool2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, output_size);
}

// aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<max_unpool3d_out::schema> create_max_unpool3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_unpool3d_out::name, max_unpool3d_out::overload_name)
      .typed<max_unpool3d_out::schema>();
}

// aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & max_unpool3d_out::call(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_max_unpool3d_out_typed_handle();
    return op.call(self, indices, output_size, stride, padding, out);
}

// aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & max_unpool3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_max_unpool3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, output_size, stride, padding, out);
}

// aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<max_unpool3d::schema> create_max_unpool3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_unpool3d::name, max_unpool3d::overload_name)
      .typed<max_unpool3d::schema>();
}

// aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor
at::Tensor max_unpool3d::call(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) {
    
    static auto op = create_max_unpool3d_typed_handle();
    return op.call(self, indices, output_size, stride, padding);
}

// aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor
at::Tensor max_unpool3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) {
    
    static auto op = create_max_unpool3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, output_size, stride, padding);
}

// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad1d_out::schema> create_reflection_pad1d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad1d_out::name, reflection_pad1d_out::overload_name)
      .typed<reflection_pad1d_out::schema>();
}

// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & reflection_pad1d_out::call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_reflection_pad1d_out_typed_handle();
    return op.call(self, padding, out);
}

// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & reflection_pad1d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_reflection_pad1d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding, out);
}

// aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad1d::schema> create_reflection_pad1d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad1d::name, reflection_pad1d::overload_name)
      .typed<reflection_pad1d::schema>();
}

// aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor
at::Tensor reflection_pad1d::call(const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad1d_typed_handle();
    return op.call(self, padding);
}

// aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor
at::Tensor reflection_pad1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad1d_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding);
}

// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad1d_backward_grad_input::schema> create_reflection_pad1d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad1d_backward_grad_input::name, reflection_pad1d_backward_grad_input::overload_name)
      .typed<reflection_pad1d_backward_grad_input::schema>();
}

// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & reflection_pad1d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_reflection_pad1d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, padding, grad_input);
}

// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & reflection_pad1d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_reflection_pad1d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding, grad_input);
}

// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad1d_backward::schema> create_reflection_pad1d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad1d_backward::name, reflection_pad1d_backward::overload_name)
      .typed<reflection_pad1d_backward::schema>();
}

// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
at::Tensor reflection_pad1d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad1d_backward_typed_handle();
    return op.call(grad_output, self, padding);
}

// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
at::Tensor reflection_pad1d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad1d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding);
}

// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad2d_out::schema> create_reflection_pad2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad2d_out::name, reflection_pad2d_out::overload_name)
      .typed<reflection_pad2d_out::schema>();
}

// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & reflection_pad2d_out::call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_reflection_pad2d_out_typed_handle();
    return op.call(self, padding, out);
}

// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & reflection_pad2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_reflection_pad2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding, out);
}

// aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad2d::schema> create_reflection_pad2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad2d::name, reflection_pad2d::overload_name)
      .typed<reflection_pad2d::schema>();
}

// aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor
at::Tensor reflection_pad2d::call(const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad2d_typed_handle();
    return op.call(self, padding);
}

// aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor
at::Tensor reflection_pad2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding);
}

// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad2d_backward_grad_input::schema> create_reflection_pad2d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad2d_backward_grad_input::name, reflection_pad2d_backward_grad_input::overload_name)
      .typed<reflection_pad2d_backward_grad_input::schema>();
}

// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & reflection_pad2d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_reflection_pad2d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, padding, grad_input);
}

// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & reflection_pad2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_reflection_pad2d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding, grad_input);
}

// aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad2d_backward::schema> create_reflection_pad2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad2d_backward::name, reflection_pad2d_backward::overload_name)
      .typed<reflection_pad2d_backward::schema>();
}

// aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
at::Tensor reflection_pad2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad2d_backward_typed_handle();
    return op.call(grad_output, self, padding);
}

// aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
at::Tensor reflection_pad2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding);
}

// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad3d_out::schema> create_reflection_pad3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad3d_out::name, reflection_pad3d_out::overload_name)
      .typed<reflection_pad3d_out::schema>();
}

// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & reflection_pad3d_out::call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_reflection_pad3d_out_typed_handle();
    return op.call(self, padding, out);
}

// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & reflection_pad3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_reflection_pad3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding, out);
}

// aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad3d::schema> create_reflection_pad3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad3d::name, reflection_pad3d::overload_name)
      .typed<reflection_pad3d::schema>();
}

// aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor
at::Tensor reflection_pad3d::call(const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad3d_typed_handle();
    return op.call(self, padding);
}

// aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor
at::Tensor reflection_pad3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding);
}

// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad3d_backward_grad_input::schema> create_reflection_pad3d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad3d_backward_grad_input::name, reflection_pad3d_backward_grad_input::overload_name)
      .typed<reflection_pad3d_backward_grad_input::schema>();
}

// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & reflection_pad3d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_reflection_pad3d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, padding, grad_input);
}

// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & reflection_pad3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_reflection_pad3d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding, grad_input);
}

// aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<reflection_pad3d_backward::schema> create_reflection_pad3d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(reflection_pad3d_backward::name, reflection_pad3d_backward::overload_name)
      .typed<reflection_pad3d_backward::schema>();
}

// aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
at::Tensor reflection_pad3d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad3d_backward_typed_handle();
    return op.call(grad_output, self, padding);
}

// aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
at::Tensor reflection_pad3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_reflection_pad3d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding);
}

// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad1d_out::schema> create_replication_pad1d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad1d_out::name, replication_pad1d_out::overload_name)
      .typed<replication_pad1d_out::schema>();
}

// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & replication_pad1d_out::call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_replication_pad1d_out_typed_handle();
    return op.call(self, padding, out);
}

// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & replication_pad1d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_replication_pad1d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding, out);
}

// aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad1d::schema> create_replication_pad1d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad1d::name, replication_pad1d::overload_name)
      .typed<replication_pad1d::schema>();
}

// aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor
at::Tensor replication_pad1d::call(const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad1d_typed_handle();
    return op.call(self, padding);
}

// aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor
at::Tensor replication_pad1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad1d_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding);
}

// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad1d_backward_grad_input::schema> create_replication_pad1d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad1d_backward_grad_input::name, replication_pad1d_backward_grad_input::overload_name)
      .typed<replication_pad1d_backward_grad_input::schema>();
}

// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & replication_pad1d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_replication_pad1d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, padding, grad_input);
}

// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & replication_pad1d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_replication_pad1d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding, grad_input);
}

// aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad1d_backward::schema> create_replication_pad1d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad1d_backward::name, replication_pad1d_backward::overload_name)
      .typed<replication_pad1d_backward::schema>();
}

// aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
at::Tensor replication_pad1d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad1d_backward_typed_handle();
    return op.call(grad_output, self, padding);
}

// aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
at::Tensor replication_pad1d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad1d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding);
}

// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad2d_out::schema> create_replication_pad2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad2d_out::name, replication_pad2d_out::overload_name)
      .typed<replication_pad2d_out::schema>();
}

// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & replication_pad2d_out::call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_replication_pad2d_out_typed_handle();
    return op.call(self, padding, out);
}

// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & replication_pad2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_replication_pad2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding, out);
}

// aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad2d::schema> create_replication_pad2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad2d::name, replication_pad2d::overload_name)
      .typed<replication_pad2d::schema>();
}

// aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor
at::Tensor replication_pad2d::call(const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad2d_typed_handle();
    return op.call(self, padding);
}

// aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor
at::Tensor replication_pad2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding);
}

// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad2d_backward_grad_input::schema> create_replication_pad2d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad2d_backward_grad_input::name, replication_pad2d_backward_grad_input::overload_name)
      .typed<replication_pad2d_backward_grad_input::schema>();
}

// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & replication_pad2d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_replication_pad2d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, padding, grad_input);
}

// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & replication_pad2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_replication_pad2d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding, grad_input);
}

// aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad2d_backward::schema> create_replication_pad2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad2d_backward::name, replication_pad2d_backward::overload_name)
      .typed<replication_pad2d_backward::schema>();
}

// aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
at::Tensor replication_pad2d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad2d_backward_typed_handle();
    return op.call(grad_output, self, padding);
}

// aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
at::Tensor replication_pad2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding);
}

// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad3d_out::schema> create_replication_pad3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad3d_out::name, replication_pad3d_out::overload_name)
      .typed<replication_pad3d_out::schema>();
}

// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & replication_pad3d_out::call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_replication_pad3d_out_typed_handle();
    return op.call(self, padding, out);
}

// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & replication_pad3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_replication_pad3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding, out);
}

// aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad3d::schema> create_replication_pad3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad3d::name, replication_pad3d::overload_name)
      .typed<replication_pad3d::schema>();
}

// aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor
at::Tensor replication_pad3d::call(const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad3d_typed_handle();
    return op.call(self, padding);
}

// aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor
at::Tensor replication_pad3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding);
}

// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad3d_backward_grad_input::schema> create_replication_pad3d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad3d_backward_grad_input::name, replication_pad3d_backward_grad_input::overload_name)
      .typed<replication_pad3d_backward_grad_input::schema>();
}

// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & replication_pad3d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_replication_pad3d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, padding, grad_input);
}

// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & replication_pad3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
    
    static auto op = create_replication_pad3d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding, grad_input);
}

// aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<replication_pad3d_backward::schema> create_replication_pad3d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(replication_pad3d_backward::name, replication_pad3d_backward::overload_name)
      .typed<replication_pad3d_backward::schema>();
}

// aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
at::Tensor replication_pad3d_backward::call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad3d_backward_typed_handle();
    return op.call(grad_output, self, padding);
}

// aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
at::Tensor replication_pad3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
    
    static auto op = create_replication_pad3d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, padding);
}

// aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_pad_circular::schema> create__pad_circular_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_pad_circular::name, _pad_circular::overload_name)
      .typed<_pad_circular::schema>();
}

// aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor
at::Tensor _pad_circular::call(const at::Tensor & self, c10::SymIntArrayRef pad) {
    
    static auto op = create__pad_circular_typed_handle();
    return op.call(self, pad);
}

// aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor
at::Tensor _pad_circular::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad) {
    
    static auto op = create__pad_circular_typed_handle();
    return op.redispatch(dispatchKeySet, self, pad);
}

// aten::_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_pad_enum::schema> create__pad_enum_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_pad_enum::name, _pad_enum::overload_name)
      .typed<_pad_enum::schema>();
}

// aten::_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor
at::Tensor _pad_enum::call(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, ::std::optional<double> value) {
    
    static auto op = create__pad_enum_typed_handle();
    return op.call(self, pad, mode, value);
}

// aten::_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor
at::Tensor _pad_enum::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, ::std::optional<double> value) {
    
    static auto op = create__pad_enum_typed_handle();
    return op.redispatch(dispatchKeySet, self, pad, mode, value);
}

// aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<pad::schema> create_pad_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pad::name, pad::overload_name)
      .typed<pad::schema>();
}

// aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor
at::Tensor pad::call(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, ::std::optional<double> value) {
    
    static auto op = create_pad_typed_handle();
    return op.call(self, pad, mode, value);
}

// aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor
at::Tensor pad::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, ::std::optional<double> value) {
    
    static auto op = create_pad_typed_handle();
    return op.redispatch(dispatchKeySet, self, pad, mode, value);
}

// aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_linear1d_vec::schema> create_upsample_linear1d_vec_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_linear1d_vec::name, upsample_linear1d_vec::overload_name)
      .typed<upsample_linear1d_vec::schema>();
}

// aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
at::Tensor upsample_linear1d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create_upsample_linear1d_vec_typed_handle();
    return op.call(input, output_size, align_corners, scale_factors);
}

// aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
at::Tensor upsample_linear1d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create_upsample_linear1d_vec_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors);
}

// aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_bilinear2d_vec::schema> create_upsample_bilinear2d_vec_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_bilinear2d_vec::name, upsample_bilinear2d_vec::overload_name)
      .typed<upsample_bilinear2d_vec::schema>();
}

// aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
at::Tensor upsample_bilinear2d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create_upsample_bilinear2d_vec_typed_handle();
    return op.call(input, output_size, align_corners, scale_factors);
}

// aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
at::Tensor upsample_bilinear2d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create_upsample_bilinear2d_vec_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors);
}

// aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bilinear2d_aa_vec::schema> create__upsample_bilinear2d_aa_vec_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_bilinear2d_aa_vec::name, _upsample_bilinear2d_aa_vec::overload_name)
      .typed<_upsample_bilinear2d_aa_vec::schema>();
}

// aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
at::Tensor _upsample_bilinear2d_aa_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create__upsample_bilinear2d_aa_vec_typed_handle();
    return op.call(input, output_size, align_corners, scale_factors);
}

// aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
at::Tensor _upsample_bilinear2d_aa_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create__upsample_bilinear2d_aa_vec_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors);
}

// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_trilinear3d_vec::schema> create_upsample_trilinear3d_vec_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_trilinear3d_vec::name, upsample_trilinear3d_vec::overload_name)
      .typed<upsample_trilinear3d_vec::schema>();
}

// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
at::Tensor upsample_trilinear3d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create_upsample_trilinear3d_vec_typed_handle();
    return op.call(input, output_size, align_corners, scale_factors);
}

// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
at::Tensor upsample_trilinear3d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create_upsample_trilinear3d_vec_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors);
}

// aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_bicubic2d_vec::schema> create_upsample_bicubic2d_vec_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_bicubic2d_vec::name, upsample_bicubic2d_vec::overload_name)
      .typed<upsample_bicubic2d_vec::schema>();
}

// aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
at::Tensor upsample_bicubic2d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create_upsample_bicubic2d_vec_typed_handle();
    return op.call(input, output_size, align_corners, scale_factors);
}

// aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
at::Tensor upsample_bicubic2d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create_upsample_bicubic2d_vec_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors);
}

// aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bicubic2d_aa_vec::schema> create__upsample_bicubic2d_aa_vec_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_bicubic2d_aa_vec::name, _upsample_bicubic2d_aa_vec::overload_name)
      .typed<_upsample_bicubic2d_aa_vec::schema>();
}

// aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
at::Tensor _upsample_bicubic2d_aa_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create__upsample_bicubic2d_aa_vec_typed_handle();
    return op.call(input, output_size, align_corners, scale_factors);
}

// aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
at::Tensor _upsample_bicubic2d_aa_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create__upsample_bicubic2d_aa_vec_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors);
}

// aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest1d_vec::schema> create_upsample_nearest1d_vec_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest1d_vec::name, upsample_nearest1d_vec::overload_name)
      .typed<upsample_nearest1d_vec::schema>();
}

// aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor upsample_nearest1d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create_upsample_nearest1d_vec_typed_handle();
    return op.call(input, output_size, scale_factors);
}

// aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor upsample_nearest1d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create_upsample_nearest1d_vec_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, scale_factors);
}

// aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact1d_vec::schema> create__upsample_nearest_exact1d_vec_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact1d_vec::name, _upsample_nearest_exact1d_vec::overload_name)
      .typed<_upsample_nearest_exact1d_vec::schema>();
}

// aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor _upsample_nearest_exact1d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create__upsample_nearest_exact1d_vec_typed_handle();
    return op.call(input, output_size, scale_factors);
}

// aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor _upsample_nearest_exact1d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create__upsample_nearest_exact1d_vec_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, scale_factors);
}

// aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest2d_vec::schema> create_upsample_nearest2d_vec_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest2d_vec::name, upsample_nearest2d_vec::overload_name)
      .typed<upsample_nearest2d_vec::schema>();
}

// aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor upsample_nearest2d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create_upsample_nearest2d_vec_typed_handle();
    return op.call(input, output_size, scale_factors);
}

// aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor upsample_nearest2d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create_upsample_nearest2d_vec_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, scale_factors);
}

// aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact2d_vec::schema> create__upsample_nearest_exact2d_vec_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact2d_vec::name, _upsample_nearest_exact2d_vec::overload_name)
      .typed<_upsample_nearest_exact2d_vec::schema>();
}

// aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor _upsample_nearest_exact2d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create__upsample_nearest_exact2d_vec_typed_handle();
    return op.call(input, output_size, scale_factors);
}

// aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor _upsample_nearest_exact2d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create__upsample_nearest_exact2d_vec_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, scale_factors);
}

// aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest3d_vec::schema> create_upsample_nearest3d_vec_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest3d_vec::name, upsample_nearest3d_vec::overload_name)
      .typed<upsample_nearest3d_vec::schema>();
}

// aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor upsample_nearest3d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create_upsample_nearest3d_vec_typed_handle();
    return op.call(input, output_size, scale_factors);
}

// aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor upsample_nearest3d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create_upsample_nearest3d_vec_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, scale_factors);
}

// aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact3d_vec::schema> create__upsample_nearest_exact3d_vec_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact3d_vec::name, _upsample_nearest_exact3d_vec::overload_name)
      .typed<_upsample_nearest_exact3d_vec::schema>();
}

// aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor _upsample_nearest_exact3d_vec::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create__upsample_nearest_exact3d_vec_typed_handle();
    return op.call(input, output_size, scale_factors);
}

// aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
at::Tensor _upsample_nearest_exact3d_vec::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors) {
    
    static auto op = create__upsample_nearest_exact3d_vec_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, scale_factors);
}

// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_linear1d_out::schema> create_upsample_linear1d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_linear1d_out::name, upsample_linear1d_out::overload_name)
      .typed<upsample_linear1d_out::schema>();
}

// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_linear1d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales, at::Tensor & out) {
    
    static auto op = create_upsample_linear1d_out_typed_handle();
    return op.call(self, output_size, align_corners, scales, out);
}

// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_linear1d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales, at::Tensor & out) {
    
    static auto op = create_upsample_linear1d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales, out);
}

// aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_linear1d::schema> create_upsample_linear1d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_linear1d::name, upsample_linear1d::overload_name)
      .typed<upsample_linear1d::schema>();
}

// aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor
at::Tensor upsample_linear1d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales) {
    
    static auto op = create_upsample_linear1d_typed_handle();
    return op.call(self, output_size, align_corners, scales);
}

// aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor
at::Tensor upsample_linear1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales) {
    
    static auto op = create_upsample_linear1d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales);
}

// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_linear1d_backward_grad_input::schema> create_upsample_linear1d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_linear1d_backward_grad_input::name, upsample_linear1d_backward_grad_input::overload_name)
      .typed<upsample_linear1d_backward_grad_input::schema>();
}

// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_linear1d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales, at::Tensor & grad_input) {
    
    static auto op = create_upsample_linear1d_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales, grad_input);
}

// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_linear1d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales, at::Tensor & grad_input) {
    
    static auto op = create_upsample_linear1d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales, grad_input);
}

// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_linear1d_backward::schema> create_upsample_linear1d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_linear1d_backward::name, upsample_linear1d_backward::overload_name)
      .typed<upsample_linear1d_backward::schema>();
}

// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor
at::Tensor upsample_linear1d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales) {
    
    static auto op = create_upsample_linear1d_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales);
}

// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor
at::Tensor upsample_linear1d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales) {
    
    static auto op = create_upsample_linear1d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales);
}

// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_bilinear2d_out::schema> create_upsample_bilinear2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_bilinear2d_out::name, upsample_bilinear2d_out::overload_name)
      .typed<upsample_bilinear2d_out::schema>();
}

// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_bilinear2d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create_upsample_bilinear2d_out_typed_handle();
    return op.call(self, output_size, align_corners, scales_h, scales_w, out);
}

// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_bilinear2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create_upsample_bilinear2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out);
}

// aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_bilinear2d::schema> create_upsample_bilinear2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_bilinear2d::name, upsample_bilinear2d::overload_name)
      .typed<upsample_bilinear2d::schema>();
}

// aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_bilinear2d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_bilinear2d_typed_handle();
    return op.call(self, output_size, align_corners, scales_h, scales_w);
}

// aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_bilinear2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_bilinear2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w);
}

// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_bilinear2d_backward_grad_input::schema> create_upsample_bilinear2d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_bilinear2d_backward_grad_input::name, upsample_bilinear2d_backward_grad_input::overload_name)
      .typed<upsample_bilinear2d_backward_grad_input::schema>();
}

// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_bilinear2d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create_upsample_bilinear2d_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}

// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_bilinear2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create_upsample_bilinear2d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}

// aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_bilinear2d_backward::schema> create_upsample_bilinear2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_bilinear2d_backward::name, upsample_bilinear2d_backward::overload_name)
      .typed<upsample_bilinear2d_backward::schema>();
}

// aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_bilinear2d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_bilinear2d_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}

// aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_bilinear2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_bilinear2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}

// aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bilinear2d_aa_out::schema> create__upsample_bilinear2d_aa_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_bilinear2d_aa_out::name, _upsample_bilinear2d_aa_out::overload_name)
      .typed<_upsample_bilinear2d_aa_out::schema>();
}

// aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _upsample_bilinear2d_aa_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create__upsample_bilinear2d_aa_out_typed_handle();
    return op.call(self, output_size, align_corners, scales_h, scales_w, out);
}

// aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _upsample_bilinear2d_aa_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create__upsample_bilinear2d_aa_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out);
}

// aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bilinear2d_aa::schema> create__upsample_bilinear2d_aa_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_bilinear2d_aa::name, _upsample_bilinear2d_aa::overload_name)
      .typed<_upsample_bilinear2d_aa::schema>();
}

// aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_bilinear2d_aa::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_bilinear2d_aa_typed_handle();
    return op.call(self, output_size, align_corners, scales_h, scales_w);
}

// aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_bilinear2d_aa::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_bilinear2d_aa_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w);
}

// aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bilinear2d_aa_backward_grad_input::schema> create__upsample_bilinear2d_aa_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_bilinear2d_aa_backward_grad_input::name, _upsample_bilinear2d_aa_backward_grad_input::overload_name)
      .typed<_upsample_bilinear2d_aa_backward_grad_input::schema>();
}

// aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_bilinear2d_aa_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create__upsample_bilinear2d_aa_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}

// aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_bilinear2d_aa_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create__upsample_bilinear2d_aa_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}

// aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bilinear2d_aa_backward::schema> create__upsample_bilinear2d_aa_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_bilinear2d_aa_backward::name, _upsample_bilinear2d_aa_backward::overload_name)
      .typed<_upsample_bilinear2d_aa_backward::schema>();
}

// aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_bilinear2d_aa_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_bilinear2d_aa_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}

// aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_bilinear2d_aa_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_bilinear2d_aa_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}

// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_bicubic2d_out::schema> create_upsample_bicubic2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_bicubic2d_out::name, upsample_bicubic2d_out::overload_name)
      .typed<upsample_bicubic2d_out::schema>();
}

// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_bicubic2d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create_upsample_bicubic2d_out_typed_handle();
    return op.call(self, output_size, align_corners, scales_h, scales_w, out);
}

// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_bicubic2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create_upsample_bicubic2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out);
}

// aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_bicubic2d::schema> create_upsample_bicubic2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_bicubic2d::name, upsample_bicubic2d::overload_name)
      .typed<upsample_bicubic2d::schema>();
}

// aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_bicubic2d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_bicubic2d_typed_handle();
    return op.call(self, output_size, align_corners, scales_h, scales_w);
}

// aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_bicubic2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_bicubic2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w);
}

// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_bicubic2d_backward_grad_input::schema> create_upsample_bicubic2d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_bicubic2d_backward_grad_input::name, upsample_bicubic2d_backward_grad_input::overload_name)
      .typed<upsample_bicubic2d_backward_grad_input::schema>();
}

// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_bicubic2d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create_upsample_bicubic2d_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}

// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_bicubic2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create_upsample_bicubic2d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}

// aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_bicubic2d_backward::schema> create_upsample_bicubic2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_bicubic2d_backward::name, upsample_bicubic2d_backward::overload_name)
      .typed<upsample_bicubic2d_backward::schema>();
}

// aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_bicubic2d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_bicubic2d_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}

// aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_bicubic2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_bicubic2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}

// aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bicubic2d_aa_out::schema> create__upsample_bicubic2d_aa_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_bicubic2d_aa_out::name, _upsample_bicubic2d_aa_out::overload_name)
      .typed<_upsample_bicubic2d_aa_out::schema>();
}

// aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _upsample_bicubic2d_aa_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create__upsample_bicubic2d_aa_out_typed_handle();
    return op.call(self, output_size, align_corners, scales_h, scales_w, out);
}

// aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _upsample_bicubic2d_aa_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create__upsample_bicubic2d_aa_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out);
}

// aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bicubic2d_aa::schema> create__upsample_bicubic2d_aa_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_bicubic2d_aa::name, _upsample_bicubic2d_aa::overload_name)
      .typed<_upsample_bicubic2d_aa::schema>();
}

// aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_bicubic2d_aa::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_bicubic2d_aa_typed_handle();
    return op.call(self, output_size, align_corners, scales_h, scales_w);
}

// aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_bicubic2d_aa::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_bicubic2d_aa_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w);
}

// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bicubic2d_aa_backward_grad_input::schema> create__upsample_bicubic2d_aa_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_bicubic2d_aa_backward_grad_input::name, _upsample_bicubic2d_aa_backward_grad_input::overload_name)
      .typed<_upsample_bicubic2d_aa_backward_grad_input::schema>();
}

// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_bicubic2d_aa_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create__upsample_bicubic2d_aa_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}

// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_bicubic2d_aa_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create__upsample_bicubic2d_aa_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
}

// aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_bicubic2d_aa_backward::schema> create__upsample_bicubic2d_aa_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_bicubic2d_aa_backward::name, _upsample_bicubic2d_aa_backward::overload_name)
      .typed<_upsample_bicubic2d_aa_backward::schema>();
}

// aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_bicubic2d_aa_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_bicubic2d_aa_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}

// aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_bicubic2d_aa_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_bicubic2d_aa_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}

// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_trilinear3d_out::schema> create_upsample_trilinear3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_trilinear3d_out::name, upsample_trilinear3d_out::overload_name)
      .typed<upsample_trilinear3d_out::schema>();
}

// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_trilinear3d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create_upsample_trilinear3d_out_typed_handle();
    return op.call(self, output_size, align_corners, scales_d, scales_h, scales_w, out);
}

// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_trilinear3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create_upsample_trilinear3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_d, scales_h, scales_w, out);
}

// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_trilinear3d::schema> create_upsample_trilinear3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_trilinear3d::name, upsample_trilinear3d::overload_name)
      .typed<upsample_trilinear3d::schema>();
}

// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_trilinear3d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_trilinear3d_typed_handle();
    return op.call(self, output_size, align_corners, scales_d, scales_h, scales_w);
}

// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_trilinear3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_trilinear3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, align_corners, scales_d, scales_h, scales_w);
}

// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_trilinear3d_backward_grad_input::schema> create_upsample_trilinear3d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_trilinear3d_backward_grad_input::name, upsample_trilinear3d_backward_grad_input::overload_name)
      .typed<upsample_trilinear3d_backward_grad_input::schema>();
}

// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_trilinear3d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create_upsample_trilinear3d_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input);
}

// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_trilinear3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create_upsample_trilinear3d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input);
}

// aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_trilinear3d_backward::schema> create_upsample_trilinear3d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_trilinear3d_backward::name, upsample_trilinear3d_backward::overload_name)
      .typed<upsample_trilinear3d_backward::schema>();
}

// aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_trilinear3d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_trilinear3d_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
}

// aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_trilinear3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_trilinear3d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
}

// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest1d_out::schema> create_upsample_nearest1d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest1d_out::name, upsample_nearest1d_out::overload_name)
      .typed<upsample_nearest1d_out::schema>();
}

// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_nearest1d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales, at::Tensor & out) {
    
    static auto op = create_upsample_nearest1d_out_typed_handle();
    return op.call(self, output_size, scales, out);
}

// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_nearest1d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales, at::Tensor & out) {
    
    static auto op = create_upsample_nearest1d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales, out);
}

// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact1d_out::schema> create__upsample_nearest_exact1d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact1d_out::name, _upsample_nearest_exact1d_out::overload_name)
      .typed<_upsample_nearest_exact1d_out::schema>();
}

// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact1d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales, at::Tensor & out) {
    
    static auto op = create__upsample_nearest_exact1d_out_typed_handle();
    return op.call(self, output_size, scales, out);
}

// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact1d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales, at::Tensor & out) {
    
    static auto op = create__upsample_nearest_exact1d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales, out);
}

// aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest1d::schema> create_upsample_nearest1d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest1d::name, upsample_nearest1d::overload_name)
      .typed<upsample_nearest1d::schema>();
}

// aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
at::Tensor upsample_nearest1d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales) {
    
    static auto op = create_upsample_nearest1d_typed_handle();
    return op.call(self, output_size, scales);
}

// aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
at::Tensor upsample_nearest1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales) {
    
    static auto op = create_upsample_nearest1d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales);
}

// aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact1d::schema> create__upsample_nearest_exact1d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact1d::name, _upsample_nearest_exact1d::overload_name)
      .typed<_upsample_nearest_exact1d::schema>();
}

// aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
at::Tensor _upsample_nearest_exact1d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales) {
    
    static auto op = create__upsample_nearest_exact1d_typed_handle();
    return op.call(self, output_size, scales);
}

// aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
at::Tensor _upsample_nearest_exact1d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales) {
    
    static auto op = create__upsample_nearest_exact1d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales);
}

// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest1d_backward_grad_input::schema> create_upsample_nearest1d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest1d_backward_grad_input::name, upsample_nearest1d_backward_grad_input::overload_name)
      .typed<upsample_nearest1d_backward_grad_input::schema>();
}

// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_nearest1d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input) {
    
    static auto op = create_upsample_nearest1d_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, scales, grad_input);
}

// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_nearest1d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input) {
    
    static auto op = create_upsample_nearest1d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales, grad_input);
}

// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact1d_backward_grad_input::schema> create__upsample_nearest_exact1d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact1d_backward_grad_input::name, _upsample_nearest_exact1d_backward_grad_input::overload_name)
      .typed<_upsample_nearest_exact1d_backward_grad_input::schema>();
}

// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact1d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input) {
    
    static auto op = create__upsample_nearest_exact1d_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, scales, grad_input);
}

// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact1d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input) {
    
    static auto op = create__upsample_nearest_exact1d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales, grad_input);
}

// aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest1d_backward::schema> create_upsample_nearest1d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest1d_backward::name, upsample_nearest1d_backward::overload_name)
      .typed<upsample_nearest1d_backward::schema>();
}

// aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
at::Tensor upsample_nearest1d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales) {
    
    static auto op = create_upsample_nearest1d_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, scales);
}

// aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
at::Tensor upsample_nearest1d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales) {
    
    static auto op = create_upsample_nearest1d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales);
}

// aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact1d_backward::schema> create__upsample_nearest_exact1d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact1d_backward::name, _upsample_nearest_exact1d_backward::overload_name)
      .typed<_upsample_nearest_exact1d_backward::schema>();
}

// aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
at::Tensor _upsample_nearest_exact1d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales) {
    
    static auto op = create__upsample_nearest_exact1d_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, scales);
}

// aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
at::Tensor _upsample_nearest_exact1d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales) {
    
    static auto op = create__upsample_nearest_exact1d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales);
}

// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest2d_out::schema> create_upsample_nearest2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest2d_out::name, upsample_nearest2d_out::overload_name)
      .typed<upsample_nearest2d_out::schema>();
}

// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_nearest2d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create_upsample_nearest2d_out_typed_handle();
    return op.call(self, output_size, scales_h, scales_w, out);
}

// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_nearest2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create_upsample_nearest2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales_h, scales_w, out);
}

// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact2d_out::schema> create__upsample_nearest_exact2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact2d_out::name, _upsample_nearest_exact2d_out::overload_name)
      .typed<_upsample_nearest_exact2d_out::schema>();
}

// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact2d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create__upsample_nearest_exact2d_out_typed_handle();
    return op.call(self, output_size, scales_h, scales_w, out);
}

// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create__upsample_nearest_exact2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales_h, scales_w, out);
}

// aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest2d::schema> create_upsample_nearest2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest2d::name, upsample_nearest2d::overload_name)
      .typed<upsample_nearest2d::schema>();
}

// aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_nearest2d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_nearest2d_typed_handle();
    return op.call(self, output_size, scales_h, scales_w);
}

// aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_nearest2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_nearest2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales_h, scales_w);
}

// aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact2d::schema> create__upsample_nearest_exact2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact2d::name, _upsample_nearest_exact2d::overload_name)
      .typed<_upsample_nearest_exact2d::schema>();
}

// aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_nearest_exact2d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_nearest_exact2d_typed_handle();
    return op.call(self, output_size, scales_h, scales_w);
}

// aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_nearest_exact2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_nearest_exact2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales_h, scales_w);
}

// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest2d_backward_grad_input::schema> create_upsample_nearest2d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest2d_backward_grad_input::name, upsample_nearest2d_backward_grad_input::overload_name)
      .typed<upsample_nearest2d_backward_grad_input::schema>();
}

// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_nearest2d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create_upsample_nearest2d_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
}

// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_nearest2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create_upsample_nearest2d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w, grad_input);
}

// aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact2d_backward_grad_input::schema> create__upsample_nearest_exact2d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact2d_backward_grad_input::name, _upsample_nearest_exact2d_backward_grad_input::overload_name)
      .typed<_upsample_nearest_exact2d_backward_grad_input::schema>();
}

// aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact2d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create__upsample_nearest_exact2d_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
}

// aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create__upsample_nearest_exact2d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w, grad_input);
}

// aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest2d_backward::schema> create_upsample_nearest2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest2d_backward::name, upsample_nearest2d_backward::overload_name)
      .typed<upsample_nearest2d_backward::schema>();
}

// aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_nearest2d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_nearest2d_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, scales_h, scales_w);
}

// aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_nearest2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_nearest2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w);
}

// aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact2d_backward::schema> create__upsample_nearest_exact2d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact2d_backward::name, _upsample_nearest_exact2d_backward::overload_name)
      .typed<_upsample_nearest_exact2d_backward::schema>();
}

// aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_nearest_exact2d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_nearest_exact2d_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, scales_h, scales_w);
}

// aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_nearest_exact2d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_nearest_exact2d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w);
}

// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest3d_out::schema> create_upsample_nearest3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest3d_out::name, upsample_nearest3d_out::overload_name)
      .typed<upsample_nearest3d_out::schema>();
}

// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_nearest3d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create_upsample_nearest3d_out_typed_handle();
    return op.call(self, output_size, scales_d, scales_h, scales_w, out);
}

// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_nearest3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create_upsample_nearest3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w, out);
}

// aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact3d_out::schema> create__upsample_nearest_exact3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact3d_out::name, _upsample_nearest_exact3d_out::overload_name)
      .typed<_upsample_nearest_exact3d_out::schema>();
}

// aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact3d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create__upsample_nearest_exact3d_out_typed_handle();
    return op.call(self, output_size, scales_d, scales_h, scales_w, out);
}

// aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
    
    static auto op = create__upsample_nearest_exact3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w, out);
}

// aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest3d::schema> create_upsample_nearest3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest3d::name, upsample_nearest3d::overload_name)
      .typed<upsample_nearest3d::schema>();
}

// aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_nearest3d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_nearest3d_typed_handle();
    return op.call(self, output_size, scales_d, scales_h, scales_w);
}

// aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_nearest3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_nearest3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w);
}

// aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact3d::schema> create__upsample_nearest_exact3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact3d::name, _upsample_nearest_exact3d::overload_name)
      .typed<_upsample_nearest_exact3d::schema>();
}

// aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_nearest_exact3d::call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_nearest_exact3d_typed_handle();
    return op.call(self, output_size, scales_d, scales_h, scales_w);
}

// aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_nearest_exact3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_nearest_exact3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w);
}

// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest3d_backward_grad_input::schema> create_upsample_nearest3d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest3d_backward_grad_input::name, upsample_nearest3d_backward_grad_input::overload_name)
      .typed<upsample_nearest3d_backward_grad_input::schema>();
}

// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_nearest3d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create_upsample_nearest3d_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
}

// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & upsample_nearest3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create_upsample_nearest3d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
}

// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact3d_backward_grad_input::schema> create__upsample_nearest_exact3d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact3d_backward_grad_input::name, _upsample_nearest_exact3d_backward_grad_input::overload_name)
      .typed<_upsample_nearest_exact3d_backward_grad_input::schema>();
}

// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact3d_backward_grad_input::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create__upsample_nearest_exact3d_backward_grad_input_typed_handle();
    return op.call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
}

// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & _upsample_nearest_exact3d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
    
    static auto op = create__upsample_nearest_exact3d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
}

// aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest3d_backward::schema> create_upsample_nearest3d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest3d_backward::name, upsample_nearest3d_backward::overload_name)
      .typed<upsample_nearest3d_backward::schema>();
}

// aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_nearest3d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_nearest3d_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
}

// aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor upsample_nearest3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create_upsample_nearest3d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w);
}

// aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_upsample_nearest_exact3d_backward::schema> create__upsample_nearest_exact3d_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_upsample_nearest_exact3d_backward::name, _upsample_nearest_exact3d_backward::overload_name)
      .typed<_upsample_nearest_exact3d_backward::schema>();
}

// aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_nearest_exact3d_backward::call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_nearest_exact3d_backward_typed_handle();
    return op.call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
}

// aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
at::Tensor _upsample_nearest_exact3d_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w) {
    
    static auto op = create__upsample_nearest_exact3d_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w);
}

// aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sigmoid_backward_grad_input::schema> create_sigmoid_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sigmoid_backward_grad_input::name, sigmoid_backward_grad_input::overload_name)
      .typed<sigmoid_backward_grad_input::schema>();
}

// aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & sigmoid_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
    
    static auto op = create_sigmoid_backward_grad_input_typed_handle();
    return op.call(grad_output, output, grad_input);
}

// aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & sigmoid_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
    
    static auto op = create_sigmoid_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, grad_input);
}

// aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sigmoid_backward::schema> create_sigmoid_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sigmoid_backward::name, sigmoid_backward::overload_name)
      .typed<sigmoid_backward::schema>();
}

// aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor
at::Tensor sigmoid_backward::call(const at::Tensor & grad_output, const at::Tensor & output) {
    
    static auto op = create_sigmoid_backward_typed_handle();
    return op.call(grad_output, output);
}

// aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor
at::Tensor sigmoid_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output) {
    
    static auto op = create_sigmoid_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output);
}

// aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<logit_backward_grad_input::schema> create_logit_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logit_backward_grad_input::name, logit_backward_grad_input::overload_name)
      .typed<logit_backward_grad_input::schema>();
}

// aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & logit_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional<double> eps, at::Tensor & grad_input) {
    
    static auto op = create_logit_backward_grad_input_typed_handle();
    return op.call(grad_output, self, eps, grad_input);
}

// aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & logit_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, ::std::optional<double> eps, at::Tensor & grad_input) {
    
    static auto op = create_logit_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, eps, grad_input);
}

// aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logit_backward::schema> create_logit_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logit_backward::name, logit_backward::overload_name)
      .typed<logit_backward::schema>();
}

// aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor
at::Tensor logit_backward::call(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional<double> eps) {
    
    static auto op = create_logit_backward_typed_handle();
    return op.call(grad_output, self, eps);
}

// aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor
at::Tensor logit_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, ::std::optional<double> eps) {
    
    static auto op = create_logit_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, eps);
}

// aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<tanh_backward_grad_input::schema> create_tanh_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tanh_backward_grad_input::name, tanh_backward_grad_input::overload_name)
      .typed<tanh_backward_grad_input::schema>();
}

// aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & tanh_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
    
    static auto op = create_tanh_backward_grad_input_typed_handle();
    return op.call(grad_output, output, grad_input);
}

// aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
at::Tensor & tanh_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
    
    static auto op = create_tanh_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, grad_input);
}

// aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<tanh_backward::schema> create_tanh_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tanh_backward::name, tanh_backward::overload_name)
      .typed<tanh_backward::schema>();
}

// aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor
at::Tensor tanh_backward::call(const at::Tensor & grad_output, const at::Tensor & output) {
    
    static auto op = create_tanh_backward_typed_handle();
    return op.call(grad_output, output);
}

// aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor
at::Tensor tanh_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output) {
    
    static auto op = create_tanh_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output);
}

// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_transpose2d_out::schema> create_slow_conv_transpose2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slow_conv_transpose2d_out::name, slow_conv_transpose2d_out::overload_name)
      .typed<slow_conv_transpose2d_out::schema>();
}

// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slow_conv_transpose2d_out::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
    
    static auto op = create_slow_conv_transpose2d_out_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
}

// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slow_conv_transpose2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
    
    static auto op = create_slow_conv_transpose2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
}

// aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_transpose2d::schema> create_slow_conv_transpose2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slow_conv_transpose2d::name, slow_conv_transpose2d::overload_name)
      .typed<slow_conv_transpose2d::schema>();
}

// aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor
at::Tensor slow_conv_transpose2d::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation) {
    
    static auto op = create_slow_conv_transpose2d_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
}

// aten::slow_conv_transpose2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt[2] dilation=1) -> Tensor
at::Tensor slow_conv_transpose2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation) {
    
    static auto op = create_slow_conv_transpose2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
}

// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_transpose3d_out::schema> create_slow_conv_transpose3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slow_conv_transpose3d_out::name, slow_conv_transpose3d_out::overload_name)
      .typed<slow_conv_transpose3d_out::schema>();
}

// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slow_conv_transpose3d_out::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
    
    static auto op = create_slow_conv_transpose3d_out_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
}

// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slow_conv_transpose3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
    
    static auto op = create_slow_conv_transpose3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
}

// aten::slow_conv_transpose3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_transpose3d::schema> create_slow_conv_transpose3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slow_conv_transpose3d::name, slow_conv_transpose3d::overload_name)
      .typed<slow_conv_transpose3d::schema>();
}

// aten::slow_conv_transpose3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1) -> Tensor
at::Tensor slow_conv_transpose3d::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation) {
    
    static auto op = create_slow_conv_transpose3d_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
}

// aten::slow_conv_transpose3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt[3] dilation=1) -> Tensor
at::Tensor slow_conv_transpose3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation) {
    
    static auto op = create_slow_conv_transpose3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
}

// aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<thnn_conv2d_out::schema> create_thnn_conv2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(thnn_conv2d_out::name, thnn_conv2d_out::overload_name)
      .typed<thnn_conv2d_out::schema>();
}

// aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & thnn_conv2d_out::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_thnn_conv2d_out_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, out);
}

// aten::thnn_conv2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & thnn_conv2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_thnn_conv2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, out);
}

// aten::thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<thnn_conv2d::schema> create_thnn_conv2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(thnn_conv2d::name, thnn_conv2d::overload_name)
      .typed<thnn_conv2d::schema>();
}

// aten::thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor
at::Tensor thnn_conv2d::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
    
    static auto op = create_thnn_conv2d_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding);
}

// aten::thnn_conv2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0) -> Tensor
at::Tensor thnn_conv2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
    
    static auto op = create_thnn_conv2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding);
}

// aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) output) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_slow_conv2d_forward_output::schema> create__slow_conv2d_forward_output_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_slow_conv2d_forward_output::name, _slow_conv2d_forward_output::overload_name)
      .typed<_slow_conv2d_forward_output::schema>();
}

// aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) output) -> Tensor(a!)
at::Tensor & _slow_conv2d_forward_output::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) {
    
    static auto op = create__slow_conv2d_forward_output_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, output);
}

// aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) output) -> Tensor(a!)
at::Tensor & _slow_conv2d_forward_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) {
    
    static auto op = create__slow_conv2d_forward_output_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output);
}

// aten::_slow_conv2d_forward(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_slow_conv2d_forward::schema> create__slow_conv2d_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_slow_conv2d_forward::name, _slow_conv2d_forward::overload_name)
      .typed<_slow_conv2d_forward::schema>();
}

// aten::_slow_conv2d_forward(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding) -> Tensor
at::Tensor _slow_conv2d_forward::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
    
    static auto op = create__slow_conv2d_forward_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding);
}

// aten::_slow_conv2d_forward(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding) -> Tensor
at::Tensor _slow_conv2d_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
    
    static auto op = create__slow_conv2d_forward_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding);
}

// aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<_slow_conv2d_backward_grad_input::schema> create__slow_conv2d_backward_grad_input_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_slow_conv2d_backward_grad_input::name, _slow_conv2d_backward_grad_input::overload_name)
      .typed<_slow_conv2d_backward_grad_input::schema>();
}

// aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_grad_input::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) {
    
    static auto op = create__slow_conv2d_backward_grad_input_typed_handle();
    return op.call(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias);
}

// aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_grad_input::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) {
    
    static auto op = create__slow_conv2d_backward_grad_input_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias);
}

// aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
static C10_NOINLINE c10::TypedOperatorHandle<_slow_conv2d_backward_output_mask::schema> create__slow_conv2d_backward_output_mask_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_slow_conv2d_backward_output_mask::name, _slow_conv2d_backward_output_mask::overload_name)
      .typed<_slow_conv2d_backward_output_mask::schema>();
}

// aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward_output_mask::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array<bool,3> output_mask) {
    
    static auto op = create__slow_conv2d_backward_output_mask_typed_handle();
    return op.call(grad_output, self, weight, kernel_size, stride, padding, output_mask);
}

// aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward_output_mask::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array<bool,3> output_mask) {
    
    static auto op = create__slow_conv2d_backward_output_mask_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, output_mask);
}

// aten::_conv_depthwise2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_conv_depthwise2d_out::schema> create__conv_depthwise2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_conv_depthwise2d_out::name, _conv_depthwise2d_out::overload_name)
      .typed<_conv_depthwise2d_out::schema>();
}

// aten::_conv_depthwise2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _conv_depthwise2d_out::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
    
    static auto op = create__conv_depthwise2d_out_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, dilation, out);
}

// aten::_conv_depthwise2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _conv_depthwise2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
    
    static auto op = create__conv_depthwise2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out);
}

// aten::_conv_depthwise2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_conv_depthwise2d::schema> create__conv_depthwise2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_conv_depthwise2d::name, _conv_depthwise2d::overload_name)
      .typed<_conv_depthwise2d::schema>();
}

// aten::_conv_depthwise2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation) -> Tensor
at::Tensor _conv_depthwise2d::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
    
    static auto op = create__conv_depthwise2d_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, dilation);
}

// aten::_conv_depthwise2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias, SymInt[2] stride, SymInt[2] padding, SymInt[2] dilation) -> Tensor
at::Tensor _conv_depthwise2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
    
    static auto op = create__conv_depthwise2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation);
}

// aten::conv_depthwise3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<conv_depthwise3d::schema> create_conv_depthwise3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conv_depthwise3d::name, conv_depthwise3d::overload_name)
      .typed<conv_depthwise3d::schema>();
}

// aten::conv_depthwise3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation) -> Tensor
at::Tensor conv_depthwise3d::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
    
    static auto op = create_conv_depthwise3d_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, dilation);
}

// aten::conv_depthwise3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation) -> Tensor
at::Tensor conv_depthwise3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
    
    static auto op = create_conv_depthwise3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation);
}

// aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<slow_conv3d_out::schema> create_slow_conv3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slow_conv3d_out::name, slow_conv3d_out::overload_name)
      .typed<slow_conv3d_out::schema>();
}

// aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slow_conv3d_out::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_slow_conv3d_out_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, out);
}

// aten::slow_conv3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slow_conv3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) {
    
    static auto op = create_slow_conv3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, out);
}

// aten::slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<slow_conv3d::schema> create_slow_conv3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slow_conv3d::name, slow_conv3d::overload_name)
      .typed<slow_conv3d::schema>();
}

// aten::slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor
at::Tensor slow_conv3d::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
    
    static auto op = create_slow_conv3d_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding);
}

// aten::slow_conv3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0) -> Tensor
at::Tensor slow_conv3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
    
    static auto op = create_slow_conv3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding);
}

// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<slow_conv3d_forward_output::schema> create_slow_conv3d_forward_output_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slow_conv3d_forward_output::name, slow_conv3d_forward_output::overload_name)
      .typed<slow_conv3d_forward_output::schema>();
}

// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)
at::Tensor & slow_conv3d_forward_output::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) {
    
    static auto op = create_slow_conv3d_forward_output_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, output);
}

// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)
at::Tensor & slow_conv3d_forward_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) {
    
    static auto op = create_slow_conv3d_forward_output_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output);
}

// aten::slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<slow_conv3d_forward::schema> create_slow_conv3d_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slow_conv3d_forward::name, slow_conv3d_forward::overload_name)
      .typed<slow_conv3d_forward::schema>();
}

// aten::slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor
at::Tensor slow_conv3d_forward::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
    
    static auto op = create_slow_conv3d_forward_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding);
}

// aten::slow_conv3d_forward(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding) -> Tensor
at::Tensor slow_conv3d_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
    
    static auto op = create_slow_conv3d_forward_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding);
}

// aten::slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_dilated2d::schema> create_slow_conv_dilated2d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slow_conv_dilated2d::name, slow_conv_dilated2d::overload_name)
      .typed<slow_conv_dilated2d::schema>();
}

// aten::slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor
at::Tensor slow_conv_dilated2d::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
    
    static auto op = create_slow_conv_dilated2d_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, dilation);
}

// aten::slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor
at::Tensor slow_conv_dilated2d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
    
    static auto op = create_slow_conv_dilated2d_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation);
}

// aten::slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_dilated3d::schema> create_slow_conv_dilated3d_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slow_conv_dilated3d::name, slow_conv_dilated3d::overload_name)
      .typed<slow_conv_dilated3d::schema>();
}

// aten::slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor
at::Tensor slow_conv_dilated3d::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
    
    static auto op = create_slow_conv_dilated3d_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, dilation);
}

// aten::slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor
at::Tensor slow_conv_dilated3d::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
    
    static auto op = create_slow_conv_dilated3d_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation);
}

// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<col2im_out::schema> create_col2im_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(col2im_out::name, col2im_out::overload_name)
      .typed<col2im_out::schema>();
}

// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & col2im_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_col2im_out_typed_handle();
    return op.call(self, output_size, kernel_size, dilation, padding, stride, out);
}

// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & col2im_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_col2im_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, kernel_size, dilation, padding, stride, out);
}

// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<col2im::schema> create_col2im_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(col2im::name, col2im::overload_name)
      .typed<col2im::schema>();
}

// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
at::Tensor col2im::call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
    
    static auto op = create_col2im_typed_handle();
    return op.call(self, output_size, kernel_size, dilation, padding, stride);
}

// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
at::Tensor col2im::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
    
    static auto op = create_col2im_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, kernel_size, dilation, padding, stride);
}

// aten::column_stack(Tensor[] tensors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<column_stack::schema> create_column_stack_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(column_stack::name, column_stack::overload_name)
      .typed<column_stack::schema>();
}

// aten::column_stack(Tensor[] tensors) -> Tensor
at::Tensor column_stack::call(at::TensorList tensors) {
    
    static auto op = create_column_stack_typed_handle();
    return op.call(tensors);
}

// aten::column_stack(Tensor[] tensors) -> Tensor
at::Tensor column_stack::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_column_stack_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<column_stack_out::schema> create_column_stack_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(column_stack_out::name, column_stack_out::overload_name)
      .typed<column_stack_out::schema>();
}

// aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & column_stack_out::call(at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_column_stack_out_typed_handle();
    return op.call(tensors, out);
}

// aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & column_stack_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_column_stack_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, out);
}

// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<im2col_out::schema> create_im2col_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(im2col_out::name, im2col_out::overload_name)
      .typed<im2col_out::schema>();
}

// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & im2col_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_im2col_out_typed_handle();
    return op.call(self, kernel_size, dilation, padding, stride, out);
}

// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & im2col_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_im2col_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, dilation, padding, stride, out);
}

// aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<im2col::schema> create_im2col_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(im2col::name, im2col::overload_name)
      .typed<im2col::schema>();
}

// aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
at::Tensor im2col::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
    
    static auto op = create_im2col_typed_handle();
    return op.call(self, kernel_size, dilation, padding, stride);
}

// aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
at::Tensor im2col::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
    
    static auto op = create_im2col_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, dilation, padding, stride);
}

// aten::isfinite(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<isfinite::schema> create_isfinite_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isfinite::name, isfinite::overload_name)
      .typed<isfinite::schema>();
}

// aten::isfinite(Tensor self) -> Tensor
at::Tensor isfinite::call(const at::Tensor & self) {
    
    static auto op = create_isfinite_typed_handle();
    return op.call(self);
}

// aten::isfinite(Tensor self) -> Tensor
at::Tensor isfinite::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_isfinite_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::isinf(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<isinf::schema> create_isinf_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isinf::name, isinf::overload_name)
      .typed<isinf::schema>();
}

// aten::isinf(Tensor self) -> Tensor
at::Tensor isinf::call(const at::Tensor & self) {
    
    static auto op = create_isinf_typed_handle();
    return op.call(self);
}

// aten::isinf(Tensor self) -> Tensor
at::Tensor isinf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_isinf_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::record_stream(Tensor(a!) self, Stream s) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<record_stream::schema> create_record_stream_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(record_stream::name, record_stream::overload_name)
      .typed<record_stream::schema>();
}

// aten::record_stream(Tensor(a!) self, Stream s) -> ()
void record_stream::call(at::Tensor & self, at::Stream s) {
    
    static auto op = create_record_stream_typed_handle();
    return op.call(self, s);
}

// aten::record_stream(Tensor(a!) self, Stream s) -> ()
void record_stream::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Stream s) {
    
    static auto op = create_record_stream_typed_handle();
    return op.redispatch(dispatchKeySet, self, s);
}

// aten::isposinf(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<isposinf::schema> create_isposinf_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isposinf::name, isposinf::overload_name)
      .typed<isposinf::schema>();
}

// aten::isposinf(Tensor self) -> Tensor
at::Tensor isposinf::call(const at::Tensor & self) {
    
    static auto op = create_isposinf_typed_handle();
    return op.call(self);
}

// aten::isposinf(Tensor self) -> Tensor
at::Tensor isposinf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_isposinf_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<isposinf_out::schema> create_isposinf_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isposinf_out::name, isposinf_out::overload_name)
      .typed<isposinf_out::schema>();
}

// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isposinf_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_isposinf_out_typed_handle();
    return op.call(self, out);
}

// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isposinf_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_isposinf_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::isneginf(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<isneginf::schema> create_isneginf_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isneginf::name, isneginf::overload_name)
      .typed<isneginf::schema>();
}

// aten::isneginf(Tensor self) -> Tensor
at::Tensor isneginf::call(const at::Tensor & self) {
    
    static auto op = create_isneginf_typed_handle();
    return op.call(self);
}

// aten::isneginf(Tensor self) -> Tensor
at::Tensor isneginf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_isneginf_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<isneginf_out::schema> create_isneginf_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isneginf_out::name, isneginf_out::overload_name)
      .typed<isneginf_out::schema>();
}

// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isneginf_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_isneginf_out_typed_handle();
    return op.call(self, out);
}

// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isneginf_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_isneginf_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_add_batch_dim::schema> create__add_batch_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_add_batch_dim::name, _add_batch_dim::overload_name)
      .typed<_add_batch_dim::schema>();
}

// aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor
at::Tensor _add_batch_dim::call(const at::Tensor & self, int64_t batch_dim, int64_t level) {
    
    static auto op = create__add_batch_dim_typed_handle();
    return op.call(self, batch_dim, level);
}

// aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor
at::Tensor _add_batch_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t batch_dim, int64_t level) {
    
    static auto op = create__add_batch_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, batch_dim, level);
}

// aten::_remove_batch_dim(Tensor self, int level, SymInt batch_size, int out_dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_remove_batch_dim::schema> create__remove_batch_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_remove_batch_dim::name, _remove_batch_dim::overload_name)
      .typed<_remove_batch_dim::schema>();
}

// aten::_remove_batch_dim(Tensor self, int level, SymInt batch_size, int out_dim) -> Tensor
at::Tensor _remove_batch_dim::call(const at::Tensor & self, int64_t level, c10::SymInt batch_size, int64_t out_dim) {
    
    static auto op = create__remove_batch_dim_typed_handle();
    return op.call(self, level, batch_size, out_dim);
}

// aten::_remove_batch_dim(Tensor self, int level, SymInt batch_size, int out_dim) -> Tensor
at::Tensor _remove_batch_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level, c10::SymInt batch_size, int64_t out_dim) {
    
    static auto op = create__remove_batch_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, level, batch_size, out_dim);
}

// aten::special_entr(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_entr::schema> create_special_entr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_entr::name, special_entr::overload_name)
      .typed<special_entr::schema>();
}

// aten::special_entr(Tensor self) -> Tensor
at::Tensor special_entr::call(const at::Tensor & self) {
    
    static auto op = create_special_entr_typed_handle();
    return op.call(self);
}

// aten::special_entr(Tensor self) -> Tensor
at::Tensor special_entr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_entr_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_entr_out::schema> create_special_entr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_entr_out::name, special_entr_out::overload_name)
      .typed<special_entr_out::schema>();
}

// aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_entr_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_entr_out_typed_handle();
    return op.call(self, out);
}

// aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_entr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_entr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_ndtri(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_ndtri::schema> create_special_ndtri_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_ndtri::name, special_ndtri::overload_name)
      .typed<special_ndtri::schema>();
}

// aten::special_ndtri(Tensor self) -> Tensor
at::Tensor special_ndtri::call(const at::Tensor & self) {
    
    static auto op = create_special_ndtri_typed_handle();
    return op.call(self);
}

// aten::special_ndtri(Tensor self) -> Tensor
at::Tensor special_ndtri::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_ndtri_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_ndtri_out::schema> create_special_ndtri_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_ndtri_out::name, special_ndtri_out::overload_name)
      .typed<special_ndtri_out::schema>();
}

// aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_ndtri_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_ndtri_out_typed_handle();
    return op.call(self, out);
}

// aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_ndtri_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_ndtri_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_log_ndtr(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_log_ndtr::schema> create_special_log_ndtr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_log_ndtr::name, special_log_ndtr::overload_name)
      .typed<special_log_ndtr::schema>();
}

// aten::special_log_ndtr(Tensor self) -> Tensor
at::Tensor special_log_ndtr::call(const at::Tensor & self) {
    
    static auto op = create_special_log_ndtr_typed_handle();
    return op.call(self);
}

// aten::special_log_ndtr(Tensor self) -> Tensor
at::Tensor special_log_ndtr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_log_ndtr_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_log_ndtr_out::schema> create_special_log_ndtr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_log_ndtr_out::name, special_log_ndtr_out::overload_name)
      .typed<special_log_ndtr_out::schema>();
}

// aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_log_ndtr_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_log_ndtr_out_typed_handle();
    return op.call(self, out);
}

// aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_log_ndtr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_log_ndtr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_expm1(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_expm1::schema> create_special_expm1_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_expm1::name, special_expm1::overload_name)
      .typed<special_expm1::schema>();
}

// aten::special_expm1(Tensor self) -> Tensor
at::Tensor special_expm1::call(const at::Tensor & self) {
    
    static auto op = create_special_expm1_typed_handle();
    return op.call(self);
}

// aten::special_expm1(Tensor self) -> Tensor
at::Tensor special_expm1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_expm1_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_expm1_out::schema> create_special_expm1_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_expm1_out::name, special_expm1_out::overload_name)
      .typed<special_expm1_out::schema>();
}

// aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_expm1_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_expm1_out_typed_handle();
    return op.call(self, out);
}

// aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_expm1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_expm1_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_exp2(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_exp2::schema> create_special_exp2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_exp2::name, special_exp2::overload_name)
      .typed<special_exp2::schema>();
}

// aten::special_exp2(Tensor self) -> Tensor
at::Tensor special_exp2::call(const at::Tensor & self) {
    
    static auto op = create_special_exp2_typed_handle();
    return op.call(self);
}

// aten::special_exp2(Tensor self) -> Tensor
at::Tensor special_exp2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_exp2_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_exp2_out::schema> create_special_exp2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_exp2_out::name, special_exp2_out::overload_name)
      .typed<special_exp2_out::schema>();
}

// aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_exp2_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_exp2_out_typed_handle();
    return op.call(self, out);
}

// aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_exp2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_exp2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_psi(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_psi::schema> create_special_psi_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_psi::name, special_psi::overload_name)
      .typed<special_psi::schema>();
}

// aten::special_psi(Tensor self) -> Tensor
at::Tensor special_psi::call(const at::Tensor & self) {
    
    static auto op = create_special_psi_typed_handle();
    return op.call(self);
}

// aten::special_psi(Tensor self) -> Tensor
at::Tensor special_psi::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_psi_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_psi_out::schema> create_special_psi_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_psi_out::name, special_psi_out::overload_name)
      .typed<special_psi_out::schema>();
}

// aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_psi_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_psi_out_typed_handle();
    return op.call(self, out);
}

// aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_psi_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_psi_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_digamma(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_digamma::schema> create_special_digamma_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_digamma::name, special_digamma::overload_name)
      .typed<special_digamma::schema>();
}

// aten::special_digamma(Tensor self) -> Tensor
at::Tensor special_digamma::call(const at::Tensor & self) {
    
    static auto op = create_special_digamma_typed_handle();
    return op.call(self);
}

// aten::special_digamma(Tensor self) -> Tensor
at::Tensor special_digamma::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_digamma_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_digamma_out::schema> create_special_digamma_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_digamma_out::name, special_digamma_out::overload_name)
      .typed<special_digamma_out::schema>();
}

// aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_digamma_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_digamma_out_typed_handle();
    return op.call(self, out);
}

// aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_digamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_digamma_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_gammaln(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_gammaln::schema> create_special_gammaln_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_gammaln::name, special_gammaln::overload_name)
      .typed<special_gammaln::schema>();
}

// aten::special_gammaln(Tensor self) -> Tensor
at::Tensor special_gammaln::call(const at::Tensor & self) {
    
    static auto op = create_special_gammaln_typed_handle();
    return op.call(self);
}

// aten::special_gammaln(Tensor self) -> Tensor
at::Tensor special_gammaln::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_gammaln_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_gammaln_out::schema> create_special_gammaln_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_gammaln_out::name, special_gammaln_out::overload_name)
      .typed<special_gammaln_out::schema>();
}

// aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_gammaln_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_gammaln_out_typed_handle();
    return op.call(self, out);
}

// aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_gammaln_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_gammaln_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_erf(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_erf::schema> create_special_erf_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_erf::name, special_erf::overload_name)
      .typed<special_erf::schema>();
}

// aten::special_erf(Tensor self) -> Tensor
at::Tensor special_erf::call(const at::Tensor & self) {
    
    static auto op = create_special_erf_typed_handle();
    return op.call(self);
}

// aten::special_erf(Tensor self) -> Tensor
at::Tensor special_erf::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_erf_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_erf_out::schema> create_special_erf_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_erf_out::name, special_erf_out::overload_name)
      .typed<special_erf_out::schema>();
}

// aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_erf_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_erf_out_typed_handle();
    return op.call(self, out);
}

// aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_erf_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_erf_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_erfc(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_erfc::schema> create_special_erfc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_erfc::name, special_erfc::overload_name)
      .typed<special_erfc::schema>();
}

// aten::special_erfc(Tensor self) -> Tensor
at::Tensor special_erfc::call(const at::Tensor & self) {
    
    static auto op = create_special_erfc_typed_handle();
    return op.call(self);
}

// aten::special_erfc(Tensor self) -> Tensor
at::Tensor special_erfc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_erfc_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_erfc_out::schema> create_special_erfc_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_erfc_out::name, special_erfc_out::overload_name)
      .typed<special_erfc_out::schema>();
}

// aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_erfc_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_erfc_out_typed_handle();
    return op.call(self, out);
}

// aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_erfc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_erfc_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_erfcx(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_erfcx::schema> create_special_erfcx_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_erfcx::name, special_erfcx::overload_name)
      .typed<special_erfcx::schema>();
}

// aten::special_erfcx(Tensor self) -> Tensor
at::Tensor special_erfcx::call(const at::Tensor & self) {
    
    static auto op = create_special_erfcx_typed_handle();
    return op.call(self);
}

// aten::special_erfcx(Tensor self) -> Tensor
at::Tensor special_erfcx::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_erfcx_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_erfcx_out::schema> create_special_erfcx_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_erfcx_out::name, special_erfcx_out::overload_name)
      .typed<special_erfcx_out::schema>();
}

// aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_erfcx_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_erfcx_out_typed_handle();
    return op.call(self, out);
}

// aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_erfcx_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_erfcx_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_erfinv(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_erfinv::schema> create_special_erfinv_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_erfinv::name, special_erfinv::overload_name)
      .typed<special_erfinv::schema>();
}

// aten::special_erfinv(Tensor self) -> Tensor
at::Tensor special_erfinv::call(const at::Tensor & self) {
    
    static auto op = create_special_erfinv_typed_handle();
    return op.call(self);
}

// aten::special_erfinv(Tensor self) -> Tensor
at::Tensor special_erfinv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_erfinv_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_erfinv_out::schema> create_special_erfinv_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_erfinv_out::name, special_erfinv_out::overload_name)
      .typed<special_erfinv_out::schema>();
}

// aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_erfinv_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_erfinv_out_typed_handle();
    return op.call(self, out);
}

// aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_erfinv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_erfinv_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_ndtr(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_ndtr::schema> create_special_ndtr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_ndtr::name, special_ndtr::overload_name)
      .typed<special_ndtr::schema>();
}

// aten::special_ndtr(Tensor self) -> Tensor
at::Tensor special_ndtr::call(const at::Tensor & self) {
    
    static auto op = create_special_ndtr_typed_handle();
    return op.call(self);
}

// aten::special_ndtr(Tensor self) -> Tensor
at::Tensor special_ndtr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_ndtr_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_ndtr_out::schema> create_special_ndtr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_ndtr_out::name, special_ndtr_out::overload_name)
      .typed<special_ndtr_out::schema>();
}

// aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_ndtr_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_ndtr_out_typed_handle();
    return op.call(self, out);
}

// aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_ndtr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_ndtr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_xlog1py(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_xlog1py::schema> create_special_xlog1py_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_xlog1py::name, special_xlog1py::overload_name)
      .typed<special_xlog1py::schema>();
}

// aten::special_xlog1py(Tensor self, Tensor other) -> Tensor
at::Tensor special_xlog1py::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_special_xlog1py_typed_handle();
    return op.call(self, other);
}

// aten::special_xlog1py(Tensor self, Tensor other) -> Tensor
at::Tensor special_xlog1py::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_special_xlog1py_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_xlog1py_self_scalar::schema> create_special_xlog1py_self_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_xlog1py_self_scalar::name, special_xlog1py_self_scalar::overload_name)
      .typed<special_xlog1py_self_scalar::schema>();
}

// aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor
at::Tensor special_xlog1py_self_scalar::call(const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_special_xlog1py_self_scalar_typed_handle();
    return op.call(self, other);
}

// aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor
at::Tensor special_xlog1py_self_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_special_xlog1py_self_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_xlog1py_other_scalar::schema> create_special_xlog1py_other_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_xlog1py_other_scalar::name, special_xlog1py_other_scalar::overload_name)
      .typed<special_xlog1py_other_scalar::schema>();
}

// aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor
at::Tensor special_xlog1py_other_scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_special_xlog1py_other_scalar_typed_handle();
    return op.call(self, other);
}

// aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor
at::Tensor special_xlog1py_other_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_special_xlog1py_other_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_xlog1py_out::schema> create_special_xlog1py_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_xlog1py_out::name, special_xlog1py_out::overload_name)
      .typed<special_xlog1py_out::schema>();
}

// aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_xlog1py_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_xlog1py_out_typed_handle();
    return op.call(self, other, out);
}

// aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_xlog1py_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_xlog1py_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_xlog1py_self_scalar_out::schema> create_special_xlog1py_self_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_xlog1py_self_scalar_out::name, special_xlog1py_self_scalar_out::overload_name)
      .typed<special_xlog1py_self_scalar_out::schema>();
}

// aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_xlog1py_self_scalar_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_xlog1py_self_scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_xlog1py_self_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_xlog1py_self_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_xlog1py_other_scalar_out::schema> create_special_xlog1py_other_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_xlog1py_other_scalar_out::name, special_xlog1py_other_scalar_out::overload_name)
      .typed<special_xlog1py_other_scalar_out::schema>();
}

// aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_xlog1py_other_scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_special_xlog1py_other_scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_xlog1py_other_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_special_xlog1py_other_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::special_xlogy(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_xlogy::schema> create_special_xlogy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_xlogy::name, special_xlogy::overload_name)
      .typed<special_xlogy::schema>();
}

// aten::special_xlogy(Tensor self, Tensor other) -> Tensor
at::Tensor special_xlogy::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_special_xlogy_typed_handle();
    return op.call(self, other);
}

// aten::special_xlogy(Tensor self, Tensor other) -> Tensor
at::Tensor special_xlogy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_special_xlogy_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_xlogy_self_scalar::schema> create_special_xlogy_self_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_xlogy_self_scalar::name, special_xlogy_self_scalar::overload_name)
      .typed<special_xlogy_self_scalar::schema>();
}

// aten::special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor
at::Tensor special_xlogy_self_scalar::call(const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_special_xlogy_self_scalar_typed_handle();
    return op.call(self, other);
}

// aten::special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor
at::Tensor special_xlogy_self_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_special_xlogy_self_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_xlogy_other_scalar::schema> create_special_xlogy_other_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_xlogy_other_scalar::name, special_xlogy_other_scalar::overload_name)
      .typed<special_xlogy_other_scalar::schema>();
}

// aten::special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor
at::Tensor special_xlogy_other_scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_special_xlogy_other_scalar_typed_handle();
    return op.call(self, other);
}

// aten::special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor
at::Tensor special_xlogy_other_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_special_xlogy_other_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_xlogy_out::schema> create_special_xlogy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_xlogy_out::name, special_xlogy_out::overload_name)
      .typed<special_xlogy_out::schema>();
}

// aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_xlogy_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_xlogy_out_typed_handle();
    return op.call(self, other, out);
}

// aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_xlogy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_xlogy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_xlogy_self_scalar_out::schema> create_special_xlogy_self_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_xlogy_self_scalar_out::name, special_xlogy_self_scalar_out::overload_name)
      .typed<special_xlogy_self_scalar_out::schema>();
}

// aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_xlogy_self_scalar_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_xlogy_self_scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_xlogy_self_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_xlogy_self_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_xlogy_other_scalar_out::schema> create_special_xlogy_other_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_xlogy_other_scalar_out::name, special_xlogy_other_scalar_out::overload_name)
      .typed<special_xlogy_other_scalar_out::schema>();
}

// aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_xlogy_other_scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_special_xlogy_other_scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_xlogy_other_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_special_xlogy_other_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::special_zeta(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_zeta::schema> create_special_zeta_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_zeta::name, special_zeta::overload_name)
      .typed<special_zeta::schema>();
}

// aten::special_zeta(Tensor self, Tensor other) -> Tensor
at::Tensor special_zeta::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_special_zeta_typed_handle();
    return op.call(self, other);
}

// aten::special_zeta(Tensor self, Tensor other) -> Tensor
at::Tensor special_zeta::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_special_zeta_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_zeta_self_scalar::schema> create_special_zeta_self_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_zeta_self_scalar::name, special_zeta_self_scalar::overload_name)
      .typed<special_zeta_self_scalar::schema>();
}

// aten::special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor
at::Tensor special_zeta_self_scalar::call(const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_special_zeta_self_scalar_typed_handle();
    return op.call(self, other);
}

// aten::special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor
at::Tensor special_zeta_self_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) {
    
    static auto op = create_special_zeta_self_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_zeta_other_scalar::schema> create_special_zeta_other_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_zeta_other_scalar::name, special_zeta_other_scalar::overload_name)
      .typed<special_zeta_other_scalar::schema>();
}

// aten::special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor
at::Tensor special_zeta_other_scalar::call(const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_special_zeta_other_scalar_typed_handle();
    return op.call(self, other);
}

// aten::special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor
at::Tensor special_zeta_other_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) {
    
    static auto op = create_special_zeta_other_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_zeta_out::schema> create_special_zeta_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_zeta_out::name, special_zeta_out::overload_name)
      .typed<special_zeta_out::schema>();
}

// aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_zeta_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_zeta_out_typed_handle();
    return op.call(self, other, out);
}

// aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_zeta_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_zeta_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_zeta_self_scalar_out::schema> create_special_zeta_self_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_zeta_self_scalar_out::name, special_zeta_self_scalar_out::overload_name)
      .typed<special_zeta_self_scalar_out::schema>();
}

// aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_zeta_self_scalar_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_zeta_self_scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_zeta_self_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_zeta_self_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_zeta_other_scalar_out::schema> create_special_zeta_other_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_zeta_other_scalar_out::name, special_zeta_other_scalar_out::overload_name)
      .typed<special_zeta_other_scalar_out::schema>();
}

// aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_zeta_other_scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_special_zeta_other_scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_zeta_other_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_special_zeta_other_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::special_i0(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_i0::schema> create_special_i0_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_i0::name, special_i0::overload_name)
      .typed<special_i0::schema>();
}

// aten::special_i0(Tensor self) -> Tensor
at::Tensor special_i0::call(const at::Tensor & self) {
    
    static auto op = create_special_i0_typed_handle();
    return op.call(self);
}

// aten::special_i0(Tensor self) -> Tensor
at::Tensor special_i0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_i0_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_i0_out::schema> create_special_i0_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_i0_out::name, special_i0_out::overload_name)
      .typed<special_i0_out::schema>();
}

// aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_i0_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_i0_out_typed_handle();
    return op.call(self, out);
}

// aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_i0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_i0_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_i0e(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_i0e::schema> create_special_i0e_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_i0e::name, special_i0e::overload_name)
      .typed<special_i0e::schema>();
}

// aten::special_i0e(Tensor self) -> Tensor
at::Tensor special_i0e::call(const at::Tensor & self) {
    
    static auto op = create_special_i0e_typed_handle();
    return op.call(self);
}

// aten::special_i0e(Tensor self) -> Tensor
at::Tensor special_i0e::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_i0e_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_i0e_out::schema> create_special_i0e_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_i0e_out::name, special_i0e_out::overload_name)
      .typed<special_i0e_out::schema>();
}

// aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_i0e_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_i0e_out_typed_handle();
    return op.call(self, out);
}

// aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_i0e_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_i0e_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_i1(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_i1::schema> create_special_i1_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_i1::name, special_i1::overload_name)
      .typed<special_i1::schema>();
}

// aten::special_i1(Tensor self) -> Tensor
at::Tensor special_i1::call(const at::Tensor & self) {
    
    static auto op = create_special_i1_typed_handle();
    return op.call(self);
}

// aten::special_i1(Tensor self) -> Tensor
at::Tensor special_i1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_i1_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_i1_out::schema> create_special_i1_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_i1_out::name, special_i1_out::overload_name)
      .typed<special_i1_out::schema>();
}

// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_i1_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_i1_out_typed_handle();
    return op.call(self, out);
}

// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_i1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_i1_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_i1e(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_i1e::schema> create_special_i1e_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_i1e::name, special_i1e::overload_name)
      .typed<special_i1e::schema>();
}

// aten::special_i1e(Tensor self) -> Tensor
at::Tensor special_i1e::call(const at::Tensor & self) {
    
    static auto op = create_special_i1e_typed_handle();
    return op.call(self);
}

// aten::special_i1e(Tensor self) -> Tensor
at::Tensor special_i1e::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_i1e_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_i1e_out::schema> create_special_i1e_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_i1e_out::name, special_i1e_out::overload_name)
      .typed<special_i1e_out::schema>();
}

// aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_i1e_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_i1e_out_typed_handle();
    return op.call(self, out);
}

// aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_i1e_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_i1e_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_logit(Tensor self, float? eps=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_logit::schema> create_special_logit_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_logit::name, special_logit::overload_name)
      .typed<special_logit::schema>();
}

// aten::special_logit(Tensor self, float? eps=None) -> Tensor
at::Tensor special_logit::call(const at::Tensor & self, ::std::optional<double> eps) {
    
    static auto op = create_special_logit_typed_handle();
    return op.call(self, eps);
}

// aten::special_logit(Tensor self, float? eps=None) -> Tensor
at::Tensor special_logit::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> eps) {
    
    static auto op = create_special_logit_typed_handle();
    return op.redispatch(dispatchKeySet, self, eps);
}

// aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_logit_out::schema> create_special_logit_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_logit_out::name, special_logit_out::overload_name)
      .typed<special_logit_out::schema>();
}

// aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_logit_out::call(const at::Tensor & self, ::std::optional<double> eps, at::Tensor & out) {
    
    static auto op = create_special_logit_out_typed_handle();
    return op.call(self, eps, out);
}

// aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_logit_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> eps, at::Tensor & out) {
    
    static auto op = create_special_logit_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, eps, out);
}

// aten::special_polygamma(int n, Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_polygamma::schema> create_special_polygamma_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_polygamma::name, special_polygamma::overload_name)
      .typed<special_polygamma::schema>();
}

// aten::special_polygamma(int n, Tensor self) -> Tensor
at::Tensor special_polygamma::call(int64_t n, const at::Tensor & self) {
    
    static auto op = create_special_polygamma_typed_handle();
    return op.call(n, self);
}

// aten::special_polygamma(int n, Tensor self) -> Tensor
at::Tensor special_polygamma::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self) {
    
    static auto op = create_special_polygamma_typed_handle();
    return op.redispatch(dispatchKeySet, n, self);
}

// aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_polygamma_out::schema> create_special_polygamma_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_polygamma_out::name, special_polygamma_out::overload_name)
      .typed<special_polygamma_out::schema>();
}

// aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_polygamma_out::call(int64_t n, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_polygamma_out_typed_handle();
    return op.call(n, self, out);
}

// aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_polygamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_polygamma_out_typed_handle();
    return op.redispatch(dispatchKeySet, n, self, out);
}

// aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_logsumexp::schema> create_special_logsumexp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_logsumexp::name, special_logsumexp::overload_name)
      .typed<special_logsumexp::schema>();
}

// aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
at::Tensor special_logsumexp::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_special_logsumexp_typed_handle();
    return op.call(self, dim, keepdim);
}

// aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
at::Tensor special_logsumexp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
    
    static auto op = create_special_logsumexp_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim);
}

// aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_logsumexp_out::schema> create_special_logsumexp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_logsumexp_out::name, special_logsumexp_out::overload_name)
      .typed<special_logsumexp_out::schema>();
}

// aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_logsumexp_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_special_logsumexp_out_typed_handle();
    return op.call(self, dim, keepdim, out);
}

// aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_logsumexp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
    
    static auto op = create_special_logsumexp_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out);
}

// aten::special_expit(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_expit::schema> create_special_expit_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_expit::name, special_expit::overload_name)
      .typed<special_expit::schema>();
}

// aten::special_expit(Tensor self) -> Tensor
at::Tensor special_expit::call(const at::Tensor & self) {
    
    static auto op = create_special_expit_typed_handle();
    return op.call(self);
}

// aten::special_expit(Tensor self) -> Tensor
at::Tensor special_expit::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_expit_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_expit_out::schema> create_special_expit_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_expit_out::name, special_expit_out::overload_name)
      .typed<special_expit_out::schema>();
}

// aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_expit_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_expit_out_typed_handle();
    return op.call(self, out);
}

// aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_expit_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_expit_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_sinc(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_sinc::schema> create_special_sinc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_sinc::name, special_sinc::overload_name)
      .typed<special_sinc::schema>();
}

// aten::special_sinc(Tensor self) -> Tensor
at::Tensor special_sinc::call(const at::Tensor & self) {
    
    static auto op = create_special_sinc_typed_handle();
    return op.call(self);
}

// aten::special_sinc(Tensor self) -> Tensor
at::Tensor special_sinc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_sinc_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_sinc_out::schema> create_special_sinc_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_sinc_out::name, special_sinc_out::overload_name)
      .typed<special_sinc_out::schema>();
}

// aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_sinc_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_sinc_out_typed_handle();
    return op.call(self, out);
}

// aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_sinc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_sinc_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_round(Tensor self, *, int decimals=0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_round::schema> create_special_round_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_round::name, special_round::overload_name)
      .typed<special_round::schema>();
}

// aten::special_round(Tensor self, *, int decimals=0) -> Tensor
at::Tensor special_round::call(const at::Tensor & self, int64_t decimals) {
    
    static auto op = create_special_round_typed_handle();
    return op.call(self, decimals);
}

// aten::special_round(Tensor self, *, int decimals=0) -> Tensor
at::Tensor special_round::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals) {
    
    static auto op = create_special_round_typed_handle();
    return op.redispatch(dispatchKeySet, self, decimals);
}

// aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_round_out::schema> create_special_round_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_round_out::name, special_round_out::overload_name)
      .typed<special_round_out::schema>();
}

// aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_round_out::call(const at::Tensor & self, int64_t decimals, at::Tensor & out) {
    
    static auto op = create_special_round_out_typed_handle();
    return op.call(self, decimals, out);
}

// aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_round_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals, at::Tensor & out) {
    
    static auto op = create_special_round_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, decimals, out);
}

// aten::special_log1p(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_log1p::schema> create_special_log1p_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_log1p::name, special_log1p::overload_name)
      .typed<special_log1p::schema>();
}

// aten::special_log1p(Tensor self) -> Tensor
at::Tensor special_log1p::call(const at::Tensor & self) {
    
    static auto op = create_special_log1p_typed_handle();
    return op.call(self);
}

// aten::special_log1p(Tensor self) -> Tensor
at::Tensor special_log1p::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_log1p_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_log1p_out::schema> create_special_log1p_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_log1p_out::name, special_log1p_out::overload_name)
      .typed<special_log1p_out::schema>();
}

// aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_log1p_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_log1p_out_typed_handle();
    return op.call(self, out);
}

// aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_log1p_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_log1p_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_log_softmax::schema> create_special_log_softmax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_log_softmax::name, special_log_softmax::overload_name)
      .typed<special_log_softmax::schema>();
}

// aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor special_log_softmax::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_special_log_softmax_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
at::Tensor special_log_softmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_special_log_softmax_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_gammainc_out::schema> create_special_gammainc_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_gammainc_out::name, special_gammainc_out::overload_name)
      .typed<special_gammainc_out::schema>();
}

// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_gammainc_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_gammainc_out_typed_handle();
    return op.call(self, other, out);
}

// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_gammainc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_gammainc_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::special_gammainc(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_gammainc::schema> create_special_gammainc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_gammainc::name, special_gammainc::overload_name)
      .typed<special_gammainc::schema>();
}

// aten::special_gammainc(Tensor self, Tensor other) -> Tensor
at::Tensor special_gammainc::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_special_gammainc_typed_handle();
    return op.call(self, other);
}

// aten::special_gammainc(Tensor self, Tensor other) -> Tensor
at::Tensor special_gammainc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_special_gammainc_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_gammaincc_out::schema> create_special_gammaincc_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_gammaincc_out::name, special_gammaincc_out::overload_name)
      .typed<special_gammaincc_out::schema>();
}

// aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_gammaincc_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_gammaincc_out_typed_handle();
    return op.call(self, other, out);
}

// aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_gammaincc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_special_gammaincc_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::special_gammaincc(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_gammaincc::schema> create_special_gammaincc_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_gammaincc::name, special_gammaincc::overload_name)
      .typed<special_gammaincc::schema>();
}

// aten::special_gammaincc(Tensor self, Tensor other) -> Tensor
at::Tensor special_gammaincc::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_special_gammaincc_typed_handle();
    return op.call(self, other);
}

// aten::special_gammaincc(Tensor self, Tensor other) -> Tensor
at::Tensor special_gammaincc::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_special_gammaincc_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::special_multigammaln(Tensor self, int p) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_multigammaln::schema> create_special_multigammaln_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_multigammaln::name, special_multigammaln::overload_name)
      .typed<special_multigammaln::schema>();
}

// aten::special_multigammaln(Tensor self, int p) -> Tensor
at::Tensor special_multigammaln::call(const at::Tensor & self, int64_t p) {
    
    static auto op = create_special_multigammaln_typed_handle();
    return op.call(self, p);
}

// aten::special_multigammaln(Tensor self, int p) -> Tensor
at::Tensor special_multigammaln::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p) {
    
    static auto op = create_special_multigammaln_typed_handle();
    return op.redispatch(dispatchKeySet, self, p);
}

// aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_multigammaln_out::schema> create_special_multigammaln_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_multigammaln_out::name, special_multigammaln_out::overload_name)
      .typed<special_multigammaln_out::schema>();
}

// aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_multigammaln_out::call(const at::Tensor & self, int64_t p, at::Tensor & out) {
    
    static auto op = create_special_multigammaln_out_typed_handle();
    return op.call(self, p, out);
}

// aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_multigammaln_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p, at::Tensor & out) {
    
    static auto op = create_special_multigammaln_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, out);
}

// aten::special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_softmax::schema> create_special_softmax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_softmax::name, special_softmax::overload_name)
      .typed<special_softmax::schema>();
}

// aten::special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor special_softmax::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_special_softmax_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor special_softmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_special_softmax_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_fft::schema> create_fft_fft_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_fft::name, fft_fft::overload_name)
      .typed<fft_fft::schema>();
}

// aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_fft::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_fft_typed_handle();
    return op.call(self, n, dim, norm);
}

// aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_fft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_fft_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm);
}

// aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_fft_out::schema> create_fft_fft_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_fft_out::name, fft_fft_out::overload_name)
      .typed<fft_fft_out::schema>();
}

// aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_fft_out::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_fft_out_typed_handle();
    return op.call(self, n, dim, norm, out);
}

// aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_fft_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_fft_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm, out);
}

// aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_ifft::schema> create_fft_ifft_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ifft::name, fft_ifft::overload_name)
      .typed<fft_ifft::schema>();
}

// aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_ifft::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ifft_typed_handle();
    return op.call(self, n, dim, norm);
}

// aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_ifft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ifft_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm);
}

// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_ifft_out::schema> create_fft_ifft_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ifft_out::name, fft_ifft_out::overload_name)
      .typed<fft_ifft_out::schema>();
}

// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ifft_out::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ifft_out_typed_handle();
    return op.call(self, n, dim, norm, out);
}

// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ifft_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ifft_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm, out);
}

// aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_rfft::schema> create_fft_rfft_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_rfft::name, fft_rfft::overload_name)
      .typed<fft_rfft::schema>();
}

// aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_rfft::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_rfft_typed_handle();
    return op.call(self, n, dim, norm);
}

// aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_rfft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_rfft_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm);
}

// aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_rfft_out::schema> create_fft_rfft_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_rfft_out::name, fft_rfft_out::overload_name)
      .typed<fft_rfft_out::schema>();
}

// aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_rfft_out::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_rfft_out_typed_handle();
    return op.call(self, n, dim, norm, out);
}

// aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_rfft_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_rfft_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm, out);
}

// aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_irfft::schema> create_fft_irfft_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_irfft::name, fft_irfft::overload_name)
      .typed<fft_irfft::schema>();
}

// aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_irfft::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_irfft_typed_handle();
    return op.call(self, n, dim, norm);
}

// aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_irfft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_irfft_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm);
}

// aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_irfft_out::schema> create_fft_irfft_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_irfft_out::name, fft_irfft_out::overload_name)
      .typed<fft_irfft_out::schema>();
}

// aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_irfft_out::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_irfft_out_typed_handle();
    return op.call(self, n, dim, norm, out);
}

// aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_irfft_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_irfft_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm, out);
}

// aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_hfft::schema> create_fft_hfft_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_hfft::name, fft_hfft::overload_name)
      .typed<fft_hfft::schema>();
}

// aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_hfft::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_hfft_typed_handle();
    return op.call(self, n, dim, norm);
}

// aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_hfft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_hfft_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm);
}

// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_hfft_out::schema> create_fft_hfft_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_hfft_out::name, fft_hfft_out::overload_name)
      .typed<fft_hfft_out::schema>();
}

// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_hfft_out::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_hfft_out_typed_handle();
    return op.call(self, n, dim, norm, out);
}

// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_hfft_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_hfft_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm, out);
}

// aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_ihfft::schema> create_fft_ihfft_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ihfft::name, fft_ihfft::overload_name)
      .typed<fft_ihfft::schema>();
}

// aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_ihfft::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ihfft_typed_handle();
    return op.call(self, n, dim, norm);
}

// aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
at::Tensor fft_ihfft::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ihfft_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm);
}

// aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_ihfft_out::schema> create_fft_ihfft_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ihfft_out::name, fft_ihfft_out::overload_name)
      .typed<fft_ihfft_out::schema>();
}

// aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ihfft_out::call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ihfft_out_typed_handle();
    return op.call(self, n, dim, norm, out);
}

// aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ihfft_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ihfft_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, dim, norm, out);
}

// aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_fft2::schema> create_fft_fft2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_fft2::name, fft_fft2::overload_name)
      .typed<fft_fft2::schema>();
}

// aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
at::Tensor fft_fft2::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_fft2_typed_handle();
    return op.call(self, s, dim, norm);
}

// aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
at::Tensor fft_fft2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_fft2_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm);
}

// aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_fft2_out::schema> create_fft_fft2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_fft2_out::name, fft_fft2_out::overload_name)
      .typed<fft_fft2_out::schema>();
}

// aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_fft2_out::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_fft2_out_typed_handle();
    return op.call(self, s, dim, norm, out);
}

// aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_fft2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_fft2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
}

// aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_ifft2::schema> create_fft_ifft2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ifft2::name, fft_ifft2::overload_name)
      .typed<fft_ifft2::schema>();
}

// aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
at::Tensor fft_ifft2::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ifft2_typed_handle();
    return op.call(self, s, dim, norm);
}

// aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
at::Tensor fft_ifft2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ifft2_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm);
}

// aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_ifft2_out::schema> create_fft_ifft2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ifft2_out::name, fft_ifft2_out::overload_name)
      .typed<fft_ifft2_out::schema>();
}

// aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ifft2_out::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ifft2_out_typed_handle();
    return op.call(self, s, dim, norm, out);
}

// aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ifft2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ifft2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
}

// aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_rfft2::schema> create_fft_rfft2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_rfft2::name, fft_rfft2::overload_name)
      .typed<fft_rfft2::schema>();
}

// aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
at::Tensor fft_rfft2::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_rfft2_typed_handle();
    return op.call(self, s, dim, norm);
}

// aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
at::Tensor fft_rfft2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_rfft2_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm);
}

// aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_rfft2_out::schema> create_fft_rfft2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_rfft2_out::name, fft_rfft2_out::overload_name)
      .typed<fft_rfft2_out::schema>();
}

// aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_rfft2_out::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_rfft2_out_typed_handle();
    return op.call(self, s, dim, norm, out);
}

// aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_rfft2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_rfft2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
}

// aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_irfft2::schema> create_fft_irfft2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_irfft2::name, fft_irfft2::overload_name)
      .typed<fft_irfft2::schema>();
}

// aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
at::Tensor fft_irfft2::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_irfft2_typed_handle();
    return op.call(self, s, dim, norm);
}

// aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
at::Tensor fft_irfft2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_irfft2_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm);
}

// aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_irfft2_out::schema> create_fft_irfft2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_irfft2_out::name, fft_irfft2_out::overload_name)
      .typed<fft_irfft2_out::schema>();
}

// aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_irfft2_out::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_irfft2_out_typed_handle();
    return op.call(self, s, dim, norm, out);
}

// aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_irfft2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_irfft2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
}

// aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_hfft2::schema> create_fft_hfft2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_hfft2::name, fft_hfft2::overload_name)
      .typed<fft_hfft2::schema>();
}

// aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
at::Tensor fft_hfft2::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_hfft2_typed_handle();
    return op.call(self, s, dim, norm);
}

// aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
at::Tensor fft_hfft2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_hfft2_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm);
}

// aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_hfft2_out::schema> create_fft_hfft2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_hfft2_out::name, fft_hfft2_out::overload_name)
      .typed<fft_hfft2_out::schema>();
}

// aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_hfft2_out::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_hfft2_out_typed_handle();
    return op.call(self, s, dim, norm, out);
}

// aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_hfft2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_hfft2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
}

// aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_ihfft2::schema> create_fft_ihfft2_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ihfft2::name, fft_ihfft2::overload_name)
      .typed<fft_ihfft2::schema>();
}

// aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
at::Tensor fft_ihfft2::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ihfft2_typed_handle();
    return op.call(self, s, dim, norm);
}

// aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
at::Tensor fft_ihfft2::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ihfft2_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm);
}

// aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_ihfft2_out::schema> create_fft_ihfft2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ihfft2_out::name, fft_ihfft2_out::overload_name)
      .typed<fft_ihfft2_out::schema>();
}

// aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ihfft2_out::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ihfft2_out_typed_handle();
    return op.call(self, s, dim, norm, out);
}

// aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ihfft2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ihfft2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
}

// aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_fftn::schema> create_fft_fftn_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_fftn::name, fft_fftn::overload_name)
      .typed<fft_fftn::schema>();
}

// aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
at::Tensor fft_fftn::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_fftn_typed_handle();
    return op.call(self, s, dim, norm);
}

// aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
at::Tensor fft_fftn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_fftn_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm);
}

// aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_fftn_out::schema> create_fft_fftn_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_fftn_out::name, fft_fftn_out::overload_name)
      .typed<fft_fftn_out::schema>();
}

// aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_fftn_out::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_fftn_out_typed_handle();
    return op.call(self, s, dim, norm, out);
}

// aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_fftn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_fftn_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
}

// aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_ifftn::schema> create_fft_ifftn_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ifftn::name, fft_ifftn::overload_name)
      .typed<fft_ifftn::schema>();
}

// aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
at::Tensor fft_ifftn::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ifftn_typed_handle();
    return op.call(self, s, dim, norm);
}

// aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
at::Tensor fft_ifftn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ifftn_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm);
}

// aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_ifftn_out::schema> create_fft_ifftn_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ifftn_out::name, fft_ifftn_out::overload_name)
      .typed<fft_ifftn_out::schema>();
}

// aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ifftn_out::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ifftn_out_typed_handle();
    return op.call(self, s, dim, norm, out);
}

// aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ifftn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ifftn_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
}

// aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_rfftn::schema> create_fft_rfftn_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_rfftn::name, fft_rfftn::overload_name)
      .typed<fft_rfftn::schema>();
}

// aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
at::Tensor fft_rfftn::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_rfftn_typed_handle();
    return op.call(self, s, dim, norm);
}

// aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
at::Tensor fft_rfftn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_rfftn_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm);
}

// aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_rfftn_out::schema> create_fft_rfftn_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_rfftn_out::name, fft_rfftn_out::overload_name)
      .typed<fft_rfftn_out::schema>();
}

// aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_rfftn_out::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_rfftn_out_typed_handle();
    return op.call(self, s, dim, norm, out);
}

// aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_rfftn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_rfftn_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
}

// aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_irfftn::schema> create_fft_irfftn_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_irfftn::name, fft_irfftn::overload_name)
      .typed<fft_irfftn::schema>();
}

// aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
at::Tensor fft_irfftn::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_irfftn_typed_handle();
    return op.call(self, s, dim, norm);
}

// aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
at::Tensor fft_irfftn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_irfftn_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm);
}

// aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_irfftn_out::schema> create_fft_irfftn_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_irfftn_out::name, fft_irfftn_out::overload_name)
      .typed<fft_irfftn_out::schema>();
}

// aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_irfftn_out::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_irfftn_out_typed_handle();
    return op.call(self, s, dim, norm, out);
}

// aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_irfftn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_irfftn_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
}

// aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_hfftn::schema> create_fft_hfftn_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_hfftn::name, fft_hfftn::overload_name)
      .typed<fft_hfftn::schema>();
}

// aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
at::Tensor fft_hfftn::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_hfftn_typed_handle();
    return op.call(self, s, dim, norm);
}

// aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
at::Tensor fft_hfftn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_hfftn_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm);
}

// aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_hfftn_out::schema> create_fft_hfftn_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_hfftn_out::name, fft_hfftn_out::overload_name)
      .typed<fft_hfftn_out::schema>();
}

// aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_hfftn_out::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_hfftn_out_typed_handle();
    return op.call(self, s, dim, norm, out);
}

// aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_hfftn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_hfftn_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
}

// aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_ihfftn::schema> create_fft_ihfftn_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ihfftn::name, fft_ihfftn::overload_name)
      .typed<fft_ihfftn::schema>();
}

// aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
at::Tensor fft_ihfftn::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ihfftn_typed_handle();
    return op.call(self, s, dim, norm);
}

// aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
at::Tensor fft_ihfftn::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm) {
    
    static auto op = create_fft_ihfftn_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm);
}

// aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_ihfftn_out::schema> create_fft_ihfftn_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ihfftn_out::name, fft_ihfftn_out::overload_name)
      .typed<fft_ihfftn_out::schema>();
}

// aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ihfftn_out::call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ihfftn_out_typed_handle();
    return op.call(self, s, dim, norm, out);
}

// aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_ihfftn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
    
    static auto op = create_fft_ihfftn_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, s, dim, norm, out);
}

// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_fftfreq::schema> create_fft_fftfreq_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_fftfreq::name, fft_fftfreq::overload_name)
      .typed<fft_fftfreq::schema>();
}

// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor fft_fftfreq::call(int64_t n, double d, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_fft_fftfreq_typed_handle();
    return op.call(n, d, dtype, layout, device, pin_memory);
}

// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor fft_fftfreq::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_fft_fftfreq_typed_handle();
    return op.redispatch(dispatchKeySet, n, d, dtype, layout, device, pin_memory);
}

// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_fftfreq_out::schema> create_fft_fftfreq_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_fftfreq_out::name, fft_fftfreq_out::overload_name)
      .typed<fft_fftfreq_out::schema>();
}

// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_fftfreq_out::call(int64_t n, double d, at::Tensor & out) {
    
    static auto op = create_fft_fftfreq_out_typed_handle();
    return op.call(n, d, out);
}

// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_fftfreq_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, at::Tensor & out) {
    
    static auto op = create_fft_fftfreq_out_typed_handle();
    return op.redispatch(dispatchKeySet, n, d, out);
}

// aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_rfftfreq::schema> create_fft_rfftfreq_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_rfftfreq::name, fft_rfftfreq::overload_name)
      .typed<fft_rfftfreq::schema>();
}

// aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor fft_rfftfreq::call(int64_t n, double d, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_fft_rfftfreq_typed_handle();
    return op.call(n, d, dtype, layout, device, pin_memory);
}

// aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor fft_rfftfreq::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create_fft_rfftfreq_typed_handle();
    return op.redispatch(dispatchKeySet, n, d, dtype, layout, device, pin_memory);
}

// aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fft_rfftfreq_out::schema> create_fft_rfftfreq_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_rfftfreq_out::name, fft_rfftfreq_out::overload_name)
      .typed<fft_rfftfreq_out::schema>();
}

// aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_rfftfreq_out::call(int64_t n, double d, at::Tensor & out) {
    
    static auto op = create_fft_rfftfreq_out_typed_handle();
    return op.call(n, d, out);
}

// aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fft_rfftfreq_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, at::Tensor & out) {
    
    static auto op = create_fft_rfftfreq_out_typed_handle();
    return op.redispatch(dispatchKeySet, n, d, out);
}

// aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_fftshift::schema> create_fft_fftshift_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_fftshift::name, fft_fftshift::overload_name)
      .typed<fft_fftshift::schema>();
}

// aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor
at::Tensor fft_fftshift::call(const at::Tensor & self, at::OptionalIntArrayRef dim) {
    
    static auto op = create_fft_fftshift_typed_handle();
    return op.call(self, dim);
}

// aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor
at::Tensor fft_fftshift::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim) {
    
    static auto op = create_fft_fftshift_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<fft_ifftshift::schema> create_fft_ifftshift_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fft_ifftshift::name, fft_ifftshift::overload_name)
      .typed<fft_ifftshift::schema>();
}

// aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor
at::Tensor fft_ifftshift::call(const at::Tensor & self, at::OptionalIntArrayRef dim) {
    
    static auto op = create_fft_ifftshift_typed_handle();
    return op.call(self, dim);
}

// aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor
at::Tensor fft_ifftshift::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim) {
    
    static auto op = create_fft_ifftshift_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_cholesky_ex::schema> create_linalg_cholesky_ex_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_cholesky_ex::name, linalg_cholesky_ex::overload_name)
      .typed<linalg_cholesky_ex::schema>();
}

// aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)
::std::tuple<at::Tensor,at::Tensor> linalg_cholesky_ex::call(const at::Tensor & self, bool upper, bool check_errors) {
    
    static auto op = create_linalg_cholesky_ex_typed_handle();
    return op.call(self, upper, check_errors);
}

// aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)
::std::tuple<at::Tensor,at::Tensor> linalg_cholesky_ex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, bool check_errors) {
    
    static auto op = create_linalg_cholesky_ex_typed_handle();
    return op.redispatch(dispatchKeySet, self, upper, check_errors);
}

// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_cholesky_ex_L::schema> create_linalg_cholesky_ex_L_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_cholesky_ex_L::name, linalg_cholesky_ex_L::overload_name)
      .typed<linalg_cholesky_ex_L::schema>();
}

// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
::std::tuple<at::Tensor &,at::Tensor &> linalg_cholesky_ex_L::call(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info) {
    
    static auto op = create_linalg_cholesky_ex_L_typed_handle();
    return op.call(self, upper, check_errors, L, info);
}

// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
::std::tuple<at::Tensor &,at::Tensor &> linalg_cholesky_ex_L::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info) {
    
    static auto op = create_linalg_cholesky_ex_L_typed_handle();
    return op.redispatch(dispatchKeySet, self, upper, check_errors, L, info);
}

// aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_cholesky::schema> create_linalg_cholesky_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_cholesky::name, linalg_cholesky::overload_name)
      .typed<linalg_cholesky::schema>();
}

// aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor
at::Tensor linalg_cholesky::call(const at::Tensor & self, bool upper) {
    
    static auto op = create_linalg_cholesky_typed_handle();
    return op.call(self, upper);
}

// aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor
at::Tensor linalg_cholesky::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper) {
    
    static auto op = create_linalg_cholesky_typed_handle();
    return op.redispatch(dispatchKeySet, self, upper);
}

// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_cholesky_out::schema> create_linalg_cholesky_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_cholesky_out::name, linalg_cholesky_out::overload_name)
      .typed<linalg_cholesky_out::schema>();
}

// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_cholesky_out::call(const at::Tensor & self, bool upper, at::Tensor & out) {
    
    static auto op = create_linalg_cholesky_out_typed_handle();
    return op.call(self, upper, out);
}

// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_cholesky_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) {
    
    static auto op = create_linalg_cholesky_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, upper, out);
}

// aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_cross::schema> create_linalg_cross_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_cross::name, linalg_cross::overload_name)
      .typed<linalg_cross::schema>();
}

// aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor
at::Tensor linalg_cross::call(const at::Tensor & self, const at::Tensor & other, int64_t dim) {
    
    static auto op = create_linalg_cross_typed_handle();
    return op.call(self, other, dim);
}

// aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor
at::Tensor linalg_cross::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t dim) {
    
    static auto op = create_linalg_cross_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, dim);
}

// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_cross_out::schema> create_linalg_cross_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_cross_out::name, linalg_cross_out::overload_name)
      .typed<linalg_cross_out::schema>();
}

// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_cross_out::call(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out) {
    
    static auto op = create_linalg_cross_out_typed_handle();
    return op.call(self, other, dim, out);
}

// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_cross_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out) {
    
    static auto op = create_linalg_cross_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, dim, out);
}

// aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu_factor::schema> create_linalg_lu_factor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_lu_factor::name, linalg_lu_factor::overload_name)
      .typed<linalg_lu_factor::schema>();
}

// aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)
::std::tuple<at::Tensor,at::Tensor> linalg_lu_factor::call(const at::Tensor & A, bool pivot) {
    
    static auto op = create_linalg_lu_factor_typed_handle();
    return op.call(A, pivot);
}

// aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)
::std::tuple<at::Tensor,at::Tensor> linalg_lu_factor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot) {
    
    static auto op = create_linalg_lu_factor_typed_handle();
    return op.redispatch(dispatchKeySet, A, pivot);
}

// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu_factor_out::schema> create_linalg_lu_factor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_lu_factor_out::name, linalg_lu_factor_out::overload_name)
      .typed<linalg_lu_factor_out::schema>();
}

// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)
::std::tuple<at::Tensor &,at::Tensor &> linalg_lu_factor_out::call(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) {
    
    static auto op = create_linalg_lu_factor_out_typed_handle();
    return op.call(A, pivot, LU, pivots);
}

// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)
::std::tuple<at::Tensor &,at::Tensor &> linalg_lu_factor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) {
    
    static auto op = create_linalg_lu_factor_out_typed_handle();
    return op.redispatch(dispatchKeySet, A, pivot, LU, pivots);
}

// aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu_factor_ex::schema> create_linalg_lu_factor_ex_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_lu_factor_ex::name, linalg_lu_factor_ex::overload_name)
      .typed<linalg_lu_factor_ex::schema>();
}

// aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_factor_ex::call(const at::Tensor & A, bool pivot, bool check_errors) {
    
    static auto op = create_linalg_lu_factor_ex_typed_handle();
    return op.call(A, pivot, check_errors);
}

// aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_factor_ex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, bool check_errors) {
    
    static auto op = create_linalg_lu_factor_ex_typed_handle();
    return op.redispatch(dispatchKeySet, A, pivot, check_errors);
}

// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu_factor_ex_out::schema> create_linalg_lu_factor_ex_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_lu_factor_ex_out::name, linalg_lu_factor_ex_out::overload_name)
      .typed<linalg_lu_factor_ex_out::schema>();
}

// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_factor_ex_out::call(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
    
    static auto op = create_linalg_lu_factor_ex_out_typed_handle();
    return op.call(A, pivot, check_errors, LU, pivots, info);
}

// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_factor_ex_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
    
    static auto op = create_linalg_lu_factor_ex_out_typed_handle();
    return op.redispatch(dispatchKeySet, A, pivot, check_errors, LU, pivots, info);
}

// aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu::schema> create_linalg_lu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_lu::name, linalg_lu::overload_name)
      .typed<linalg_lu::schema>();
}

// aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu::call(const at::Tensor & A, bool pivot) {
    
    static auto op = create_linalg_lu_typed_handle();
    return op.call(A, pivot);
}

// aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot) {
    
    static auto op = create_linalg_lu_typed_handle();
    return op.redispatch(dispatchKeySet, A, pivot);
}

// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu_out::schema> create_linalg_lu_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_lu_out::name, linalg_lu_out::overload_name)
      .typed<linalg_lu_out::schema>();
}

// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_out::call(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
    
    static auto op = create_linalg_lu_out_typed_handle();
    return op.call(A, pivot, P, L, U);
}

// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
    
    static auto op = create_linalg_lu_out_typed_handle();
    return op.redispatch(dispatchKeySet, A, pivot, P, L, U);
}

// aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu_solve::schema> create_linalg_lu_solve_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_lu_solve::name, linalg_lu_solve::overload_name)
      .typed<linalg_lu_solve::schema>();
}

// aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor
at::Tensor linalg_lu_solve::call(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) {
    
    static auto op = create_linalg_lu_solve_typed_handle();
    return op.call(LU, pivots, B, left, adjoint);
}

// aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor
at::Tensor linalg_lu_solve::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) {
    
    static auto op = create_linalg_lu_solve_typed_handle();
    return op.redispatch(dispatchKeySet, LU, pivots, B, left, adjoint);
}

// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_lu_solve_out::schema> create_linalg_lu_solve_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_lu_solve_out::name, linalg_lu_solve_out::overload_name)
      .typed<linalg_lu_solve_out::schema>();
}

// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_lu_solve_out::call(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out) {
    
    static auto op = create_linalg_lu_solve_out_typed_handle();
    return op.call(LU, pivots, B, left, adjoint, out);
}

// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_lu_solve_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out) {
    
    static auto op = create_linalg_lu_solve_out_typed_handle();
    return op.redispatch(dispatchKeySet, LU, pivots, B, left, adjoint, out);
}

// aten::_linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)
static C10_NOINLINE c10::TypedOperatorHandle<_linalg_det::schema> create__linalg_det_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_linalg_det::name, _linalg_det::overload_name)
      .typed<_linalg_det::schema>();
}

// aten::_linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_det::call(const at::Tensor & A) {
    
    static auto op = create__linalg_det_typed_handle();
    return op.call(A);
}

// aten::_linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_det::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) {
    
    static auto op = create__linalg_det_typed_handle();
    return op.redispatch(dispatchKeySet, A);
}

// aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)
static C10_NOINLINE c10::TypedOperatorHandle<_linalg_det_result::schema> create__linalg_det_result_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_linalg_det_result::name, _linalg_det_result::overload_name)
      .typed<_linalg_det_result::schema>();
}

// aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_det_result::call(const at::Tensor & A, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots) {
    
    static auto op = create__linalg_det_result_typed_handle();
    return op.call(A, result, LU, pivots);
}

// aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_det_result::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots) {
    
    static auto op = create__linalg_det_result_typed_handle();
    return op.redispatch(dispatchKeySet, A, result, LU, pivots);
}

// aten::linalg_det(Tensor A) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_det::schema> create_linalg_det_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_det::name, linalg_det::overload_name)
      .typed<linalg_det::schema>();
}

// aten::linalg_det(Tensor A) -> Tensor
at::Tensor linalg_det::call(const at::Tensor & A) {
    
    static auto op = create_linalg_det_typed_handle();
    return op.call(A);
}

// aten::linalg_det(Tensor A) -> Tensor
at::Tensor linalg_det::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) {
    
    static auto op = create_linalg_det_typed_handle();
    return op.redispatch(dispatchKeySet, A);
}

// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_det_out::schema> create_linalg_det_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_det_out::name, linalg_det_out::overload_name)
      .typed<linalg_det_out::schema>();
}

// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_det_out::call(const at::Tensor & A, at::Tensor & out) {
    
    static auto op = create_linalg_det_out_typed_handle();
    return op.call(A, out);
}

// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_det_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out) {
    
    static auto op = create_linalg_det_out_typed_handle();
    return op.redispatch(dispatchKeySet, A, out);
}

// aten::det(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<det::schema> create_det_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(det::name, det::overload_name)
      .typed<det::schema>();
}

// aten::det(Tensor self) -> Tensor
at::Tensor det::call(const at::Tensor & self) {
    
    static auto op = create_det_typed_handle();
    return op.call(self);
}

// aten::det(Tensor self) -> Tensor
at::Tensor det::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_det_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_ldl_factor_ex::schema> create_linalg_ldl_factor_ex_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_ldl_factor_ex::name, linalg_ldl_factor_ex::overload_name)
      .typed<linalg_ldl_factor_ex::schema>();
}

// aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_ldl_factor_ex::call(const at::Tensor & self, bool hermitian, bool check_errors) {
    
    static auto op = create_linalg_ldl_factor_ex_typed_handle();
    return op.call(self, hermitian, check_errors);
}

// aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_ldl_factor_ex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, bool check_errors) {
    
    static auto op = create_linalg_ldl_factor_ex_typed_handle();
    return op.redispatch(dispatchKeySet, self, hermitian, check_errors);
}

// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_ldl_factor_ex_out::schema> create_linalg_ldl_factor_ex_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_ldl_factor_ex_out::name, linalg_ldl_factor_ex_out::overload_name)
      .typed<linalg_ldl_factor_ex_out::schema>();
}

// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_ldl_factor_ex_out::call(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) {
    
    static auto op = create_linalg_ldl_factor_ex_out_typed_handle();
    return op.call(self, hermitian, check_errors, LD, pivots, info);
}

// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_ldl_factor_ex_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) {
    
    static auto op = create_linalg_ldl_factor_ex_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, hermitian, check_errors, LD, pivots, info);
}

// aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_ldl_factor::schema> create_linalg_ldl_factor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_ldl_factor::name, linalg_ldl_factor::overload_name)
      .typed<linalg_ldl_factor::schema>();
}

// aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)
::std::tuple<at::Tensor,at::Tensor> linalg_ldl_factor::call(const at::Tensor & self, bool hermitian) {
    
    static auto op = create_linalg_ldl_factor_typed_handle();
    return op.call(self, hermitian);
}

// aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)
::std::tuple<at::Tensor,at::Tensor> linalg_ldl_factor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian) {
    
    static auto op = create_linalg_ldl_factor_typed_handle();
    return op.redispatch(dispatchKeySet, self, hermitian);
}

// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_ldl_factor_out::schema> create_linalg_ldl_factor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_ldl_factor_out::name, linalg_ldl_factor_out::overload_name)
      .typed<linalg_ldl_factor_out::schema>();
}

// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)
::std::tuple<at::Tensor &,at::Tensor &> linalg_ldl_factor_out::call(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots) {
    
    static auto op = create_linalg_ldl_factor_out_typed_handle();
    return op.call(self, hermitian, LD, pivots);
}

// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)
::std::tuple<at::Tensor &,at::Tensor &> linalg_ldl_factor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots) {
    
    static auto op = create_linalg_ldl_factor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, hermitian, LD, pivots);
}

// aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_ldl_solve::schema> create_linalg_ldl_solve_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_ldl_solve::name, linalg_ldl_solve::overload_name)
      .typed<linalg_ldl_solve::schema>();
}

// aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor
at::Tensor linalg_ldl_solve::call(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
    
    static auto op = create_linalg_ldl_solve_typed_handle();
    return op.call(LD, pivots, B, hermitian);
}

// aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor
at::Tensor linalg_ldl_solve::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
    
    static auto op = create_linalg_ldl_solve_typed_handle();
    return op.redispatch(dispatchKeySet, LD, pivots, B, hermitian);
}

// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_ldl_solve_out::schema> create_linalg_ldl_solve_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_ldl_solve_out::name, linalg_ldl_solve_out::overload_name)
      .typed<linalg_ldl_solve_out::schema>();
}

// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_ldl_solve_out::call(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_ldl_solve_out_typed_handle();
    return op.call(LD, pivots, B, hermitian, out);
}

// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_ldl_solve_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_ldl_solve_out_typed_handle();
    return op.redispatch(dispatchKeySet, LD, pivots, B, hermitian, out);
}

// aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_lstsq::schema> create_linalg_lstsq_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_lstsq::name, linalg_lstsq::overload_name)
      .typed<linalg_lstsq::schema>();
}

// aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> linalg_lstsq::call(const at::Tensor & self, const at::Tensor & b, ::std::optional<double> rcond, ::std::optional<c10::string_view> driver) {
    
    static auto op = create_linalg_lstsq_typed_handle();
    return op.call(self, b, rcond, driver);
}

// aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> linalg_lstsq::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, ::std::optional<double> rcond, ::std::optional<c10::string_view> driver) {
    
    static auto op = create_linalg_lstsq_typed_handle();
    return op.redispatch(dispatchKeySet, self, b, rcond, driver);
}

// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_lstsq_out::schema> create_linalg_lstsq_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_lstsq_out::name, linalg_lstsq_out::overload_name)
      .typed<linalg_lstsq_out::schema>();
}

// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> linalg_lstsq_out::call(const at::Tensor & self, const at::Tensor & b, ::std::optional<double> rcond, ::std::optional<c10::string_view> driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) {
    
    static auto op = create_linalg_lstsq_out_typed_handle();
    return op.call(self, b, rcond, driver, solution, residuals, rank, singular_values);
}

// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> linalg_lstsq_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, ::std::optional<double> rcond, ::std::optional<c10::string_view> driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) {
    
    static auto op = create_linalg_lstsq_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, b, rcond, driver, solution, residuals, rank, singular_values);
}

// aten::linalg_matmul(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matmul::schema> create_linalg_matmul_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matmul::name, linalg_matmul::overload_name)
      .typed<linalg_matmul::schema>();
}

// aten::linalg_matmul(Tensor self, Tensor other) -> Tensor
at::Tensor linalg_matmul::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_linalg_matmul_typed_handle();
    return op.call(self, other);
}

// aten::linalg_matmul(Tensor self, Tensor other) -> Tensor
at::Tensor linalg_matmul::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_linalg_matmul_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matmul_out::schema> create_linalg_matmul_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matmul_out::name, linalg_matmul_out::overload_name)
      .typed<linalg_matmul_out::schema>();
}

// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matmul_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_linalg_matmul_out_typed_handle();
    return op.call(self, other, out);
}

// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matmul_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_linalg_matmul_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_vecdot::schema> create_linalg_vecdot_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_vecdot::name, linalg_vecdot::overload_name)
      .typed<linalg_vecdot::schema>();
}

// aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor
at::Tensor linalg_vecdot::call(const at::Tensor & x, const at::Tensor & y, int64_t dim) {
    
    static auto op = create_linalg_vecdot_typed_handle();
    return op.call(x, y, dim);
}

// aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor
at::Tensor linalg_vecdot::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & y, int64_t dim) {
    
    static auto op = create_linalg_vecdot_typed_handle();
    return op.redispatch(dispatchKeySet, x, y, dim);
}

// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_vecdot_out::schema> create_linalg_vecdot_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_vecdot_out::name, linalg_vecdot_out::overload_name)
      .typed<linalg_vecdot_out::schema>();
}

// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_vecdot_out::call(const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out) {
    
    static auto op = create_linalg_vecdot_out_typed_handle();
    return op.call(x, y, dim, out);
}

// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_vecdot_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out) {
    
    static auto op = create_linalg_vecdot_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, y, dim, out);
}

// aten::linalg_matrix_exp(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_exp::schema> create_linalg_matrix_exp_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_exp::name, linalg_matrix_exp::overload_name)
      .typed<linalg_matrix_exp::schema>();
}

// aten::linalg_matrix_exp(Tensor self) -> Tensor
at::Tensor linalg_matrix_exp::call(const at::Tensor & self) {
    
    static auto op = create_linalg_matrix_exp_typed_handle();
    return op.call(self);
}

// aten::linalg_matrix_exp(Tensor self) -> Tensor
at::Tensor linalg_matrix_exp::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_linalg_matrix_exp_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)
static C10_NOINLINE c10::TypedOperatorHandle<_linalg_slogdet::schema> create__linalg_slogdet_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_linalg_slogdet::name, _linalg_slogdet::overload_name)
      .typed<_linalg_slogdet::schema>();
}

// aten::_linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_slogdet::call(const at::Tensor & A) {
    
    static auto op = create__linalg_slogdet_typed_handle();
    return op.call(A);
}

// aten::_linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_slogdet::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) {
    
    static auto op = create__linalg_slogdet_typed_handle();
    return op.redispatch(dispatchKeySet, A);
}

// aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)
static C10_NOINLINE c10::TypedOperatorHandle<_linalg_slogdet_sign::schema> create__linalg_slogdet_sign_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_linalg_slogdet_sign::name, _linalg_slogdet_sign::overload_name)
      .typed<_linalg_slogdet_sign::schema>();
}

// aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_slogdet_sign::call(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots) {
    
    static auto op = create__linalg_slogdet_sign_typed_handle();
    return op.call(A, sign, logabsdet, LU, pivots);
}

// aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_slogdet_sign::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots) {
    
    static auto op = create__linalg_slogdet_sign_typed_handle();
    return op.redispatch(dispatchKeySet, A, sign, logabsdet, LU, pivots);
}

// aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_slogdet::schema> create_linalg_slogdet_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_slogdet::name, linalg_slogdet::overload_name)
      .typed<linalg_slogdet::schema>();
}

// aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)
::std::tuple<at::Tensor,at::Tensor> linalg_slogdet::call(const at::Tensor & A) {
    
    static auto op = create_linalg_slogdet_typed_handle();
    return op.call(A);
}

// aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)
::std::tuple<at::Tensor,at::Tensor> linalg_slogdet::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) {
    
    static auto op = create_linalg_slogdet_typed_handle();
    return op.redispatch(dispatchKeySet, A);
}

// aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_slogdet_out::schema> create_linalg_slogdet_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_slogdet_out::name, linalg_slogdet_out::overload_name)
      .typed<linalg_slogdet_out::schema>();
}

// aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
::std::tuple<at::Tensor &,at::Tensor &> linalg_slogdet_out::call(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet) {
    
    static auto op = create_linalg_slogdet_out_typed_handle();
    return op.call(A, sign, logabsdet);
}

// aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
::std::tuple<at::Tensor &,at::Tensor &> linalg_slogdet_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet) {
    
    static auto op = create_linalg_slogdet_out_typed_handle();
    return op.redispatch(dispatchKeySet, A, sign, logabsdet);
}

// aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
static C10_NOINLINE c10::TypedOperatorHandle<slogdet::schema> create_slogdet_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slogdet::name, slogdet::overload_name)
      .typed<slogdet::schema>();
}

// aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
::std::tuple<at::Tensor,at::Tensor> slogdet::call(const at::Tensor & self) {
    
    static auto op = create_slogdet_typed_handle();
    return op.call(self);
}

// aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
::std::tuple<at::Tensor,at::Tensor> slogdet::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_slogdet_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
static C10_NOINLINE c10::TypedOperatorHandle<slogdet_out::schema> create_slogdet_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slogdet_out::name, slogdet_out::overload_name)
      .typed<slogdet_out::schema>();
}

// aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
::std::tuple<at::Tensor &,at::Tensor &> slogdet_out::call(const at::Tensor & self, at::Tensor & sign, at::Tensor & logabsdet) {
    
    static auto op = create_slogdet_out_typed_handle();
    return op.call(self, sign, logabsdet);
}

// aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
::std::tuple<at::Tensor &,at::Tensor &> slogdet_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & sign, at::Tensor & logabsdet) {
    
    static auto op = create_slogdet_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, sign, logabsdet);
}

// aten::logdet(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<logdet::schema> create_logdet_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(logdet::name, logdet::overload_name)
      .typed<logdet::schema>();
}

// aten::logdet(Tensor self) -> Tensor
at::Tensor logdet::call(const at::Tensor & self) {
    
    static auto op = create_logdet_typed_handle();
    return op.call(self);
}

// aten::logdet(Tensor self) -> Tensor
at::Tensor logdet::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_logdet_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_eig::schema> create_linalg_eig_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_eig::name, linalg_eig::overload_name)
      .typed<linalg_eig::schema>();
}

// aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)
::std::tuple<at::Tensor,at::Tensor> linalg_eig::call(const at::Tensor & self) {
    
    static auto op = create_linalg_eig_typed_handle();
    return op.call(self);
}

// aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)
::std::tuple<at::Tensor,at::Tensor> linalg_eig::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_linalg_eig_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_eig_out::schema> create_linalg_eig_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_eig_out::name, linalg_eig_out::overload_name)
      .typed<linalg_eig_out::schema>();
}

// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
::std::tuple<at::Tensor &,at::Tensor &> linalg_eig_out::call(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
    
    static auto op = create_linalg_eig_out_typed_handle();
    return op.call(self, eigenvalues, eigenvectors);
}

// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
::std::tuple<at::Tensor &,at::Tensor &> linalg_eig_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
    
    static auto op = create_linalg_eig_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, eigenvalues, eigenvectors);
}

// aten::_linalg_eigvals(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_linalg_eigvals::schema> create__linalg_eigvals_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_linalg_eigvals::name, _linalg_eigvals::overload_name)
      .typed<_linalg_eigvals::schema>();
}

// aten::_linalg_eigvals(Tensor self) -> Tensor
at::Tensor _linalg_eigvals::call(const at::Tensor & self) {
    
    static auto op = create__linalg_eigvals_typed_handle();
    return op.call(self);
}

// aten::_linalg_eigvals(Tensor self) -> Tensor
at::Tensor _linalg_eigvals::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__linalg_eigvals_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::linalg_eigvals(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_eigvals::schema> create_linalg_eigvals_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_eigvals::name, linalg_eigvals::overload_name)
      .typed<linalg_eigvals::schema>();
}

// aten::linalg_eigvals(Tensor self) -> Tensor
at::Tensor linalg_eigvals::call(const at::Tensor & self) {
    
    static auto op = create_linalg_eigvals_typed_handle();
    return op.call(self);
}

// aten::linalg_eigvals(Tensor self) -> Tensor
at::Tensor linalg_eigvals::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_linalg_eigvals_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_eigvals_out::schema> create_linalg_eigvals_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_eigvals_out::name, linalg_eigvals_out::overload_name)
      .typed<linalg_eigvals_out::schema>();
}

// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_eigvals_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_linalg_eigvals_out_typed_handle();
    return op.call(self, out);
}

// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_eigvals_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_linalg_eigvals_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)
static C10_NOINLINE c10::TypedOperatorHandle<_linalg_eigh::schema> create__linalg_eigh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_linalg_eigh::name, _linalg_eigh::overload_name)
      .typed<_linalg_eigh::schema>();
}

// aten::_linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)
::std::tuple<at::Tensor,at::Tensor> _linalg_eigh::call(const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
    
    static auto op = create__linalg_eigh_typed_handle();
    return op.call(A, UPLO, compute_v);
}

// aten::_linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)
::std::tuple<at::Tensor,at::Tensor> _linalg_eigh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
    
    static auto op = create__linalg_eigh_typed_handle();
    return op.redispatch(dispatchKeySet, A, UPLO, compute_v);
}

// aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
static C10_NOINLINE c10::TypedOperatorHandle<_linalg_eigh_eigenvalues::schema> create__linalg_eigh_eigenvalues_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_linalg_eigh_eigenvalues::name, _linalg_eigh_eigenvalues::overload_name)
      .typed<_linalg_eigh_eigenvalues::schema>();
}

// aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_eigenvalues::call(const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
    
    static auto op = create__linalg_eigh_eigenvalues_typed_handle();
    return op.call(A, UPLO, compute_v, eigenvalues, eigenvectors);
}

// aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_eigenvalues::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
    
    static auto op = create__linalg_eigh_eigenvalues_typed_handle();
    return op.redispatch(dispatchKeySet, A, UPLO, compute_v, eigenvalues, eigenvectors);
}

// aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_eigh::schema> create_linalg_eigh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_eigh::name, linalg_eigh::overload_name)
      .typed<linalg_eigh::schema>();
}

// aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors)
::std::tuple<at::Tensor,at::Tensor> linalg_eigh::call(const at::Tensor & self, c10::string_view UPLO) {
    
    static auto op = create_linalg_eigh_typed_handle();
    return op.call(self, UPLO);
}

// aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors)
::std::tuple<at::Tensor,at::Tensor> linalg_eigh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO) {
    
    static auto op = create_linalg_eigh_typed_handle();
    return op.redispatch(dispatchKeySet, self, UPLO);
}

// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_eigh_eigvals::schema> create_linalg_eigh_eigvals_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_eigh_eigvals::name, linalg_eigh_eigvals::overload_name)
      .typed<linalg_eigh_eigvals::schema>();
}

// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
::std::tuple<at::Tensor &,at::Tensor &> linalg_eigh_eigvals::call(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs) {
    
    static auto op = create_linalg_eigh_eigvals_typed_handle();
    return op.call(self, UPLO, eigvals, eigvecs);
}

// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
::std::tuple<at::Tensor &,at::Tensor &> linalg_eigh_eigvals::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs) {
    
    static auto op = create_linalg_eigh_eigvals_typed_handle();
    return op.redispatch(dispatchKeySet, self, UPLO, eigvals, eigvecs);
}

// aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_eigvalsh::schema> create_linalg_eigvalsh_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_eigvalsh::name, linalg_eigvalsh::overload_name)
      .typed<linalg_eigvalsh::schema>();
}

// aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor
at::Tensor linalg_eigvalsh::call(const at::Tensor & self, c10::string_view UPLO) {
    
    static auto op = create_linalg_eigvalsh_typed_handle();
    return op.call(self, UPLO);
}

// aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor
at::Tensor linalg_eigvalsh::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO) {
    
    static auto op = create_linalg_eigvalsh_typed_handle();
    return op.redispatch(dispatchKeySet, self, UPLO);
}

// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_eigvalsh_out::schema> create_linalg_eigvalsh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_eigvalsh_out::name, linalg_eigvalsh_out::overload_name)
      .typed<linalg_eigvalsh_out::schema>();
}

// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_eigvalsh_out::call(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out) {
    
    static auto op = create_linalg_eigvalsh_out_typed_handle();
    return op.call(self, UPLO, out);
}

// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_eigvalsh_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & out) {
    
    static auto op = create_linalg_eigvalsh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, UPLO, out);
}

// aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_householder_product::schema> create_linalg_householder_product_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_householder_product::name, linalg_householder_product::overload_name)
      .typed<linalg_householder_product::schema>();
}

// aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor
at::Tensor linalg_householder_product::call(const at::Tensor & input, const at::Tensor & tau) {
    
    static auto op = create_linalg_householder_product_typed_handle();
    return op.call(input, tau);
}

// aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor
at::Tensor linalg_householder_product::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau) {
    
    static auto op = create_linalg_householder_product_typed_handle();
    return op.redispatch(dispatchKeySet, input, tau);
}

// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_householder_product_out::schema> create_linalg_householder_product_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_householder_product_out::name, linalg_householder_product_out::overload_name)
      .typed<linalg_householder_product_out::schema>();
}

// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_householder_product_out::call(const at::Tensor & input, const at::Tensor & tau, at::Tensor & out) {
    
    static auto op = create_linalg_householder_product_out_typed_handle();
    return op.call(input, tau, out);
}

// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_householder_product_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau, at::Tensor & out) {
    
    static auto op = create_linalg_householder_product_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, tau, out);
}

// aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_inv_ex::schema> create_linalg_inv_ex_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_inv_ex::name, linalg_inv_ex::overload_name)
      .typed<linalg_inv_ex::schema>();
}

// aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)
::std::tuple<at::Tensor,at::Tensor> linalg_inv_ex::call(const at::Tensor & A, bool check_errors) {
    
    static auto op = create_linalg_inv_ex_typed_handle();
    return op.call(A, check_errors);
}

// aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)
::std::tuple<at::Tensor,at::Tensor> linalg_inv_ex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors) {
    
    static auto op = create_linalg_inv_ex_typed_handle();
    return op.redispatch(dispatchKeySet, A, check_errors);
}

// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_inv_ex_inverse::schema> create_linalg_inv_ex_inverse_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_inv_ex_inverse::name, linalg_inv_ex_inverse::overload_name)
      .typed<linalg_inv_ex_inverse::schema>();
}

// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
::std::tuple<at::Tensor &,at::Tensor &> linalg_inv_ex_inverse::call(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info) {
    
    static auto op = create_linalg_inv_ex_inverse_typed_handle();
    return op.call(A, check_errors, inverse, info);
}

// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
::std::tuple<at::Tensor &,at::Tensor &> linalg_inv_ex_inverse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info) {
    
    static auto op = create_linalg_inv_ex_inverse_typed_handle();
    return op.redispatch(dispatchKeySet, A, check_errors, inverse, info);
}

// aten::linalg_inv(Tensor A) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_inv::schema> create_linalg_inv_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_inv::name, linalg_inv::overload_name)
      .typed<linalg_inv::schema>();
}

// aten::linalg_inv(Tensor A) -> Tensor
at::Tensor linalg_inv::call(const at::Tensor & A) {
    
    static auto op = create_linalg_inv_typed_handle();
    return op.call(A);
}

// aten::linalg_inv(Tensor A) -> Tensor
at::Tensor linalg_inv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) {
    
    static auto op = create_linalg_inv_typed_handle();
    return op.redispatch(dispatchKeySet, A);
}

// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_inv_out::schema> create_linalg_inv_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_inv_out::name, linalg_inv_out::overload_name)
      .typed<linalg_inv_out::schema>();
}

// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_inv_out::call(const at::Tensor & A, at::Tensor & out) {
    
    static auto op = create_linalg_inv_out_typed_handle();
    return op.call(A, out);
}

// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_inv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out) {
    
    static auto op = create_linalg_inv_out_typed_handle();
    return op.redispatch(dispatchKeySet, A, out);
}

// aten::inverse(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<inverse::schema> create_inverse_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(inverse::name, inverse::overload_name)
      .typed<inverse::schema>();
}

// aten::inverse(Tensor self) -> Tensor
at::Tensor inverse::call(const at::Tensor & self) {
    
    static auto op = create_inverse_typed_handle();
    return op.call(self);
}

// aten::inverse(Tensor self) -> Tensor
at::Tensor inverse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_inverse_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<inverse_out::schema> create_inverse_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(inverse_out::name, inverse_out::overload_name)
      .typed<inverse_out::schema>();
}

// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & inverse_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_inverse_out_typed_handle();
    return op.call(self, out);
}

// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & inverse_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_inverse_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::inner(Tensor self, Tensor other) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<inner::schema> create_inner_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(inner::name, inner::overload_name)
      .typed<inner::schema>();
}

// aten::inner(Tensor self, Tensor other) -> Tensor
at::Tensor inner::call(const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_inner_typed_handle();
    return op.call(self, other);
}

// aten::inner(Tensor self, Tensor other) -> Tensor
at::Tensor inner::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
    
    static auto op = create_inner_typed_handle();
    return op.redispatch(dispatchKeySet, self, other);
}

// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<inner_out::schema> create_inner_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(inner_out::name, inner_out::overload_name)
      .typed<inner_out::schema>();
}

// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & inner_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_inner_out_typed_handle();
    return op.call(self, other, out);
}

// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & inner_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_inner_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::outer(Tensor self, Tensor vec2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<outer::schema> create_outer_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(outer::name, outer::overload_name)
      .typed<outer::schema>();
}

// aten::outer(Tensor self, Tensor vec2) -> Tensor
at::Tensor outer::call(const at::Tensor & self, const at::Tensor & vec2) {
    
    static auto op = create_outer_typed_handle();
    return op.call(self, vec2);
}

// aten::outer(Tensor self, Tensor vec2) -> Tensor
at::Tensor outer::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2) {
    
    static auto op = create_outer_typed_handle();
    return op.redispatch(dispatchKeySet, self, vec2);
}

// aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<outer_out::schema> create_outer_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(outer_out::name, outer_out::overload_name)
      .typed<outer_out::schema>();
}

// aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & outer_out::call(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
    
    static auto op = create_outer_out_typed_handle();
    return op.call(self, vec2, out);
}

// aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & outer_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
    
    static auto op = create_outer_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, vec2, out);
}

// aten::ger(Tensor self, Tensor vec2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ger::schema> create_ger_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ger::name, ger::overload_name)
      .typed<ger::schema>();
}

// aten::ger(Tensor self, Tensor vec2) -> Tensor
at::Tensor ger::call(const at::Tensor & self, const at::Tensor & vec2) {
    
    static auto op = create_ger_typed_handle();
    return op.call(self, vec2);
}

// aten::ger(Tensor self, Tensor vec2) -> Tensor
at::Tensor ger::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2) {
    
    static auto op = create_ger_typed_handle();
    return op.redispatch(dispatchKeySet, self, vec2);
}

// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ger_out::schema> create_ger_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ger_out::name, ger_out::overload_name)
      .typed<ger_out::schema>();
}

// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ger_out::call(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
    
    static auto op = create_ger_out_typed_handle();
    return op.call(self, vec2, out);
}

// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ger_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
    
    static auto op = create_ger_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, vec2, out);
}

// aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_norm::schema> create_linalg_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_norm::name, linalg_norm::overload_name)
      .typed<linalg_norm::schema>();
}

// aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor linalg_norm::call(const at::Tensor & self, const ::std::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_linalg_norm_typed_handle();
    return op.call(self, ord, dim, keepdim, dtype);
}

// aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor linalg_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_linalg_norm_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype);
}

// aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_norm_ord_str::schema> create_linalg_norm_ord_str_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_norm_ord_str::name, linalg_norm_ord_str::overload_name)
      .typed<linalg_norm_ord_str::schema>();
}

// aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor linalg_norm_ord_str::call(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_linalg_norm_ord_str_typed_handle();
    return op.call(self, ord, dim, keepdim, dtype);
}

// aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor linalg_norm_ord_str::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_linalg_norm_ord_str_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype);
}

// aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_norm_out::schema> create_linalg_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_norm_out::name, linalg_norm_out::overload_name)
      .typed<linalg_norm_out::schema>();
}

// aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_norm_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_linalg_norm_out_typed_handle();
    return op.call(self, ord, dim, keepdim, dtype, out);
}

// aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_linalg_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out);
}

// aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_norm_ord_str_out::schema> create_linalg_norm_ord_str_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_norm_ord_str_out::name, linalg_norm_ord_str_out::overload_name)
      .typed<linalg_norm_ord_str_out::schema>();
}

// aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_norm_ord_str_out::call(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_linalg_norm_ord_str_out_typed_handle();
    return op.call(self, ord, dim, keepdim, dtype, out);
}

// aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_norm_ord_str_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_linalg_norm_ord_str_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out);
}

// aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_vector_norm::schema> create_linalg_vector_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_vector_norm::name, linalg_vector_norm::overload_name)
      .typed<linalg_vector_norm::schema>();
}

// aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor linalg_vector_norm::call(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_linalg_vector_norm_typed_handle();
    return op.call(self, ord, dim, keepdim, dtype);
}

// aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor linalg_vector_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_linalg_vector_norm_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype);
}

// aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_vector_norm_out::schema> create_linalg_vector_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_vector_norm_out::name, linalg_vector_norm_out::overload_name)
      .typed<linalg_vector_norm_out::schema>();
}

// aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_vector_norm_out::call(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_linalg_vector_norm_out_typed_handle();
    return op.call(self, ord, dim, keepdim, dtype, out);
}

// aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_vector_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_linalg_vector_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out);
}

// aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_norm::schema> create_linalg_matrix_norm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_norm::name, linalg_matrix_norm::overload_name)
      .typed<linalg_matrix_norm::schema>();
}

// aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor linalg_matrix_norm::call(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_linalg_matrix_norm_typed_handle();
    return op.call(self, ord, dim, keepdim, dtype);
}

// aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor linalg_matrix_norm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_linalg_matrix_norm_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype);
}

// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_norm_out::schema> create_linalg_matrix_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_norm_out::name, linalg_matrix_norm_out::overload_name)
      .typed<linalg_matrix_norm_out::schema>();
}

// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_norm_out::call(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_norm_out_typed_handle();
    return op.call(self, ord, dim, keepdim, dtype, out);
}

// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out);
}

// aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_norm_str_ord::schema> create_linalg_matrix_norm_str_ord_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_norm_str_ord::name, linalg_matrix_norm_str_ord::overload_name)
      .typed<linalg_matrix_norm_str_ord::schema>();
}

// aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor linalg_matrix_norm_str_ord::call(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_linalg_matrix_norm_str_ord_typed_handle();
    return op.call(self, ord, dim, keepdim, dtype);
}

// aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
at::Tensor linalg_matrix_norm_str_ord::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create_linalg_matrix_norm_str_ord_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype);
}

// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_norm_str_ord_out::schema> create_linalg_matrix_norm_str_ord_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_norm_str_ord_out::name, linalg_matrix_norm_str_ord_out::overload_name)
      .typed<linalg_matrix_norm_str_ord_out::schema>();
}

// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_norm_str_ord_out::call(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_norm_str_ord_out_typed_handle();
    return op.call(self, ord, dim, keepdim, dtype, out);
}

// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_norm_str_ord_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_norm_str_ord_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out);
}

// aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
static C10_NOINLINE c10::TypedOperatorHandle<_linalg_svd::schema> create__linalg_svd_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_linalg_svd::name, _linalg_svd::overload_name)
      .typed<_linalg_svd::schema>();
}

// aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_svd::call(const at::Tensor & A, bool full_matrices, bool compute_uv, ::std::optional<c10::string_view> driver) {
    
    static auto op = create__linalg_svd_typed_handle();
    return op.call(A, full_matrices, compute_uv, driver);
}

// aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_svd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, bool compute_uv, ::std::optional<c10::string_view> driver) {
    
    static auto op = create__linalg_svd_typed_handle();
    return op.redispatch(dispatchKeySet, A, full_matrices, compute_uv, driver);
}

// aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
static C10_NOINLINE c10::TypedOperatorHandle<_linalg_svd_U::schema> create__linalg_svd_U_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_linalg_svd_U::name, _linalg_svd_U::overload_name)
      .typed<_linalg_svd_U::schema>();
}

// aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_svd_U::call(const at::Tensor & A, bool full_matrices, bool compute_uv, ::std::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
    
    static auto op = create__linalg_svd_U_typed_handle();
    return op.call(A, full_matrices, compute_uv, driver, U, S, Vh);
}

// aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_svd_U::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, bool compute_uv, ::std::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
    
    static auto op = create__linalg_svd_U_typed_handle();
    return op.redispatch(dispatchKeySet, A, full_matrices, compute_uv, driver, U, S, Vh);
}

// aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_svd::schema> create_linalg_svd_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_svd::name, linalg_svd::overload_name)
      .typed<linalg_svd::schema>();
}

// aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_svd::call(const at::Tensor & A, bool full_matrices, ::std::optional<c10::string_view> driver) {
    
    static auto op = create_linalg_svd_typed_handle();
    return op.call(A, full_matrices, driver);
}

// aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_svd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, ::std::optional<c10::string_view> driver) {
    
    static auto op = create_linalg_svd_typed_handle();
    return op.redispatch(dispatchKeySet, A, full_matrices, driver);
}

// aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_svd_U::schema> create_linalg_svd_U_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_svd_U::name, linalg_svd_U::overload_name)
      .typed<linalg_svd_U::schema>();
}

// aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_svd_U::call(const at::Tensor & A, bool full_matrices, ::std::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
    
    static auto op = create_linalg_svd_U_typed_handle();
    return op.call(A, full_matrices, driver, U, S, Vh);
}

// aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_svd_U::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, ::std::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
    
    static auto op = create_linalg_svd_U_typed_handle();
    return op.redispatch(dispatchKeySet, A, full_matrices, driver, U, S, Vh);
}

// aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_svdvals::schema> create_linalg_svdvals_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_svdvals::name, linalg_svdvals::overload_name)
      .typed<linalg_svdvals::schema>();
}

// aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor
at::Tensor linalg_svdvals::call(const at::Tensor & A, ::std::optional<c10::string_view> driver) {
    
    static auto op = create_linalg_svdvals_typed_handle();
    return op.call(A, driver);
}

// aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor
at::Tensor linalg_svdvals::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, ::std::optional<c10::string_view> driver) {
    
    static auto op = create_linalg_svdvals_typed_handle();
    return op.redispatch(dispatchKeySet, A, driver);
}

// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_svdvals_out::schema> create_linalg_svdvals_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_svdvals_out::name, linalg_svdvals_out::overload_name)
      .typed<linalg_svdvals_out::schema>();
}

// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_svdvals_out::call(const at::Tensor & A, ::std::optional<c10::string_view> driver, at::Tensor & out) {
    
    static auto op = create_linalg_svdvals_out_typed_handle();
    return op.call(A, driver, out);
}

// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_svdvals_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, ::std::optional<c10::string_view> driver, at::Tensor & out) {
    
    static auto op = create_linalg_svdvals_out_typed_handle();
    return op.redispatch(dispatchKeySet, A, driver, out);
}

// aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_cond::schema> create_linalg_cond_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_cond::name, linalg_cond::overload_name)
      .typed<linalg_cond::schema>();
}

// aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor
at::Tensor linalg_cond::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p) {
    
    static auto op = create_linalg_cond_typed_handle();
    return op.call(self, p);
}

// aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor
at::Tensor linalg_cond::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p) {
    
    static auto op = create_linalg_cond_typed_handle();
    return op.redispatch(dispatchKeySet, self, p);
}

// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_cond_out::schema> create_linalg_cond_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_cond_out::name, linalg_cond_out::overload_name)
      .typed<linalg_cond_out::schema>();
}

// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_cond_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::Tensor & out) {
    
    static auto op = create_linalg_cond_out_typed_handle();
    return op.call(self, p, out);
}

// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_cond_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::Tensor & out) {
    
    static auto op = create_linalg_cond_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, out);
}

// aten::linalg_cond.p_str(Tensor self, str p) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_cond_p_str::schema> create_linalg_cond_p_str_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_cond_p_str::name, linalg_cond_p_str::overload_name)
      .typed<linalg_cond_p_str::schema>();
}

// aten::linalg_cond.p_str(Tensor self, str p) -> Tensor
at::Tensor linalg_cond_p_str::call(const at::Tensor & self, c10::string_view p) {
    
    static auto op = create_linalg_cond_p_str_typed_handle();
    return op.call(self, p);
}

// aten::linalg_cond.p_str(Tensor self, str p) -> Tensor
at::Tensor linalg_cond_p_str::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p) {
    
    static auto op = create_linalg_cond_p_str_typed_handle();
    return op.redispatch(dispatchKeySet, self, p);
}

// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_cond_p_str_out::schema> create_linalg_cond_p_str_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_cond_p_str_out::name, linalg_cond_p_str_out::overload_name)
      .typed<linalg_cond_p_str_out::schema>();
}

// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_cond_p_str_out::call(const at::Tensor & self, c10::string_view p, at::Tensor & out) {
    
    static auto op = create_linalg_cond_p_str_out_typed_handle();
    return op.call(self, p, out);
}

// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_cond_p_str_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p, at::Tensor & out) {
    
    static auto op = create_linalg_cond_p_str_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, out);
}

// aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv_atol_rtol_tensor::schema> create_linalg_pinv_atol_rtol_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_pinv_atol_rtol_tensor::name, linalg_pinv_atol_rtol_tensor::overload_name)
      .typed<linalg_pinv_atol_rtol_tensor::schema>();
}

// aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
at::Tensor linalg_pinv_atol_rtol_tensor::call(const at::Tensor & self, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian) {
    
    static auto op = create_linalg_pinv_atol_rtol_tensor_typed_handle();
    return op.call(self, atol, rtol, hermitian);
}

// aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
at::Tensor linalg_pinv_atol_rtol_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian) {
    
    static auto op = create_linalg_pinv_atol_rtol_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, atol, rtol, hermitian);
}

// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv_atol_rtol_tensor_out::schema> create_linalg_pinv_atol_rtol_tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_pinv_atol_rtol_tensor_out::name, linalg_pinv_atol_rtol_tensor_out::overload_name)
      .typed<linalg_pinv_atol_rtol_tensor_out::schema>();
}

// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_pinv_atol_rtol_tensor_out::call(const at::Tensor & self, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_pinv_atol_rtol_tensor_out_typed_handle();
    return op.call(self, atol, rtol, hermitian, out);
}

// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_pinv_atol_rtol_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_pinv_atol_rtol_tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, atol, rtol, hermitian, out);
}

// aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv_atol_rtol_float::schema> create_linalg_pinv_atol_rtol_float_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_pinv_atol_rtol_float::name, linalg_pinv_atol_rtol_float::overload_name)
      .typed<linalg_pinv_atol_rtol_float::schema>();
}

// aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
at::Tensor linalg_pinv_atol_rtol_float::call(const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian) {
    
    static auto op = create_linalg_pinv_atol_rtol_float_typed_handle();
    return op.call(self, atol, rtol, hermitian);
}

// aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
at::Tensor linalg_pinv_atol_rtol_float::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian) {
    
    static auto op = create_linalg_pinv_atol_rtol_float_typed_handle();
    return op.redispatch(dispatchKeySet, self, atol, rtol, hermitian);
}

// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv_atol_rtol_float_out::schema> create_linalg_pinv_atol_rtol_float_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_pinv_atol_rtol_float_out::name, linalg_pinv_atol_rtol_float_out::overload_name)
      .typed<linalg_pinv_atol_rtol_float_out::schema>();
}

// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_pinv_atol_rtol_float_out::call(const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_pinv_atol_rtol_float_out_typed_handle();
    return op.call(self, atol, rtol, hermitian, out);
}

// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_pinv_atol_rtol_float_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_pinv_atol_rtol_float_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, atol, rtol, hermitian, out);
}

// aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv::schema> create_linalg_pinv_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_pinv::name, linalg_pinv::overload_name)
      .typed<linalg_pinv::schema>();
}

// aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor
at::Tensor linalg_pinv::call(const at::Tensor & self, double rcond, bool hermitian) {
    
    static auto op = create_linalg_pinv_typed_handle();
    return op.call(self, rcond, hermitian);
}

// aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor
at::Tensor linalg_pinv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond, bool hermitian) {
    
    static auto op = create_linalg_pinv_typed_handle();
    return op.redispatch(dispatchKeySet, self, rcond, hermitian);
}

// aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv_rcond_tensor::schema> create_linalg_pinv_rcond_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_pinv_rcond_tensor::name, linalg_pinv_rcond_tensor::overload_name)
      .typed<linalg_pinv_rcond_tensor::schema>();
}

// aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor
at::Tensor linalg_pinv_rcond_tensor::call(const at::Tensor & self, const at::Tensor & rcond, bool hermitian) {
    
    static auto op = create_linalg_pinv_rcond_tensor_typed_handle();
    return op.call(self, rcond, hermitian);
}

// aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor
at::Tensor linalg_pinv_rcond_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & rcond, bool hermitian) {
    
    static auto op = create_linalg_pinv_rcond_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, rcond, hermitian);
}

// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv_out::schema> create_linalg_pinv_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_pinv_out::name, linalg_pinv_out::overload_name)
      .typed<linalg_pinv_out::schema>();
}

// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_pinv_out::call(const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_pinv_out_typed_handle();
    return op.call(self, rcond, hermitian, out);
}

// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_pinv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_pinv_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, rcond, hermitian, out);
}

// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_pinv_out_rcond_tensor::schema> create_linalg_pinv_out_rcond_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_pinv_out_rcond_tensor::name, linalg_pinv_out_rcond_tensor::overload_name)
      .typed<linalg_pinv_out_rcond_tensor::schema>();
}

// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_pinv_out_rcond_tensor::call(const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_pinv_out_rcond_tensor_typed_handle();
    return op.call(self, rcond, hermitian, out);
}

// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_pinv_out_rcond_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_pinv_out_rcond_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, rcond, hermitian, out);
}

// aten::_linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)
static C10_NOINLINE c10::TypedOperatorHandle<_linalg_solve_ex::schema> create__linalg_solve_ex_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_linalg_solve_ex::name, _linalg_solve_ex::overload_name)
      .typed<_linalg_solve_ex::schema>();
}

// aten::_linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_solve_ex::call(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
    
    static auto op = create__linalg_solve_ex_typed_handle();
    return op.call(A, B, left, check_errors);
}

// aten::_linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_solve_ex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
    
    static auto op = create__linalg_solve_ex_typed_handle();
    return op.redispatch(dispatchKeySet, A, B, left, check_errors);
}

// aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)
static C10_NOINLINE c10::TypedOperatorHandle<_linalg_solve_ex_result::schema> create__linalg_solve_ex_result_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_linalg_solve_ex_result::name, _linalg_solve_ex_result::overload_name)
      .typed<_linalg_solve_ex_result::schema>();
}

// aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_solve_ex_result::call(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
    
    static auto op = create__linalg_solve_ex_result_typed_handle();
    return op.call(A, B, left, check_errors, result, LU, pivots, info);
}

// aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_solve_ex_result::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
    
    static auto op = create__linalg_solve_ex_result_typed_handle();
    return op.redispatch(dispatchKeySet, A, B, left, check_errors, result, LU, pivots, info);
}

// aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_solve_ex::schema> create_linalg_solve_ex_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_solve_ex::name, linalg_solve_ex::overload_name)
      .typed<linalg_solve_ex::schema>();
}

// aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)
::std::tuple<at::Tensor,at::Tensor> linalg_solve_ex::call(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
    
    static auto op = create_linalg_solve_ex_typed_handle();
    return op.call(A, B, left, check_errors);
}

// aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)
::std::tuple<at::Tensor,at::Tensor> linalg_solve_ex::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
    
    static auto op = create_linalg_solve_ex_typed_handle();
    return op.redispatch(dispatchKeySet, A, B, left, check_errors);
}

// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_solve_ex_out::schema> create_linalg_solve_ex_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_solve_ex_out::name, linalg_solve_ex_out::overload_name)
      .typed<linalg_solve_ex_out::schema>();
}

// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)
::std::tuple<at::Tensor &,at::Tensor &> linalg_solve_ex_out::call(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info) {
    
    static auto op = create_linalg_solve_ex_out_typed_handle();
    return op.call(A, B, left, check_errors, result, info);
}

// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)
::std::tuple<at::Tensor &,at::Tensor &> linalg_solve_ex_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info) {
    
    static auto op = create_linalg_solve_ex_out_typed_handle();
    return op.redispatch(dispatchKeySet, A, B, left, check_errors, result, info);
}

// aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_solve::schema> create_linalg_solve_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_solve::name, linalg_solve::overload_name)
      .typed<linalg_solve::schema>();
}

// aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor
at::Tensor linalg_solve::call(const at::Tensor & A, const at::Tensor & B, bool left) {
    
    static auto op = create_linalg_solve_typed_handle();
    return op.call(A, B, left);
}

// aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor
at::Tensor linalg_solve::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left) {
    
    static auto op = create_linalg_solve_typed_handle();
    return op.redispatch(dispatchKeySet, A, B, left);
}

// aten::_spsolve(Tensor A, Tensor B, *, bool left=True) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_spsolve::schema> create__spsolve_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_spsolve::name, _spsolve::overload_name)
      .typed<_spsolve::schema>();
}

// aten::_spsolve(Tensor A, Tensor B, *, bool left=True) -> Tensor
at::Tensor _spsolve::call(const at::Tensor & A, const at::Tensor & B, bool left) {
    
    static auto op = create__spsolve_typed_handle();
    return op.call(A, B, left);
}

// aten::_spsolve(Tensor A, Tensor B, *, bool left=True) -> Tensor
at::Tensor _spsolve::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left) {
    
    static auto op = create__spsolve_typed_handle();
    return op.redispatch(dispatchKeySet, A, B, left);
}

// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_solve_out::schema> create_linalg_solve_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_solve_out::name, linalg_solve_out::overload_name)
      .typed<linalg_solve_out::schema>();
}

// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_solve_out::call(const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out) {
    
    static auto op = create_linalg_solve_out_typed_handle();
    return op.call(A, B, left, out);
}

// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_solve_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out) {
    
    static auto op = create_linalg_solve_out_typed_handle();
    return op.redispatch(dispatchKeySet, A, B, left, out);
}

// aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_tensorinv::schema> create_linalg_tensorinv_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_tensorinv::name, linalg_tensorinv::overload_name)
      .typed<linalg_tensorinv::schema>();
}

// aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor
at::Tensor linalg_tensorinv::call(const at::Tensor & self, int64_t ind) {
    
    static auto op = create_linalg_tensorinv_typed_handle();
    return op.call(self, ind);
}

// aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor
at::Tensor linalg_tensorinv::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t ind) {
    
    static auto op = create_linalg_tensorinv_typed_handle();
    return op.redispatch(dispatchKeySet, self, ind);
}

// aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_tensorinv_out::schema> create_linalg_tensorinv_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_tensorinv_out::name, linalg_tensorinv_out::overload_name)
      .typed<linalg_tensorinv_out::schema>();
}

// aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_tensorinv_out::call(const at::Tensor & self, int64_t ind, at::Tensor & out) {
    
    static auto op = create_linalg_tensorinv_out_typed_handle();
    return op.call(self, ind, out);
}

// aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_tensorinv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t ind, at::Tensor & out) {
    
    static auto op = create_linalg_tensorinv_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, ind, out);
}

// aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_tensorsolve::schema> create_linalg_tensorsolve_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_tensorsolve::name, linalg_tensorsolve::overload_name)
      .typed<linalg_tensorsolve::schema>();
}

// aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor
at::Tensor linalg_tensorsolve::call(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) {
    
    static auto op = create_linalg_tensorsolve_typed_handle();
    return op.call(self, other, dims);
}

// aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor
at::Tensor linalg_tensorsolve::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) {
    
    static auto op = create_linalg_tensorsolve_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, dims);
}

// aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_tensorsolve_out::schema> create_linalg_tensorsolve_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_tensorsolve_out::name, linalg_tensorsolve_out::overload_name)
      .typed<linalg_tensorsolve_out::schema>();
}

// aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_tensorsolve_out::call(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out) {
    
    static auto op = create_linalg_tensorsolve_out_typed_handle();
    return op.call(self, other, dims, out);
}

// aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_tensorsolve_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out) {
    
    static auto op = create_linalg_tensorsolve_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, dims, out);
}

// aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_qr::schema> create_linalg_qr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_qr::name, linalg_qr::overload_name)
      .typed<linalg_qr::schema>();
}

// aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)
::std::tuple<at::Tensor,at::Tensor> linalg_qr::call(const at::Tensor & A, c10::string_view mode) {
    
    static auto op = create_linalg_qr_typed_handle();
    return op.call(A, mode);
}

// aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)
::std::tuple<at::Tensor,at::Tensor> linalg_qr::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view mode) {
    
    static auto op = create_linalg_qr_typed_handle();
    return op.redispatch(dispatchKeySet, A, mode);
}

// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_qr_out::schema> create_linalg_qr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_qr_out::name, linalg_qr_out::overload_name)
      .typed<linalg_qr_out::schema>();
}

// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
::std::tuple<at::Tensor &,at::Tensor &> linalg_qr_out::call(const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R) {
    
    static auto op = create_linalg_qr_out_typed_handle();
    return op.call(A, mode, Q, R);
}

// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
::std::tuple<at::Tensor &,at::Tensor &> linalg_qr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R) {
    
    static auto op = create_linalg_qr_out_typed_handle();
    return op.redispatch(dispatchKeySet, A, mode, Q, R);
}

// aten::linalg_matrix_power(Tensor self, int n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_power::schema> create_linalg_matrix_power_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_power::name, linalg_matrix_power::overload_name)
      .typed<linalg_matrix_power::schema>();
}

// aten::linalg_matrix_power(Tensor self, int n) -> Tensor
at::Tensor linalg_matrix_power::call(const at::Tensor & self, int64_t n) {
    
    static auto op = create_linalg_matrix_power_typed_handle();
    return op.call(self, n);
}

// aten::linalg_matrix_power(Tensor self, int n) -> Tensor
at::Tensor linalg_matrix_power::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n) {
    
    static auto op = create_linalg_matrix_power_typed_handle();
    return op.redispatch(dispatchKeySet, self, n);
}

// aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_power_out::schema> create_linalg_matrix_power_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_power_out::name, linalg_matrix_power_out::overload_name)
      .typed<linalg_matrix_power_out::schema>();
}

// aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_power_out::call(const at::Tensor & self, int64_t n, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_power_out_typed_handle();
    return op.call(self, n, out);
}

// aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_power_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_power_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, n, out);
}

// aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank_atol_rtol_tensor::schema> create_linalg_matrix_rank_atol_rtol_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_rank_atol_rtol_tensor::name, linalg_matrix_rank_atol_rtol_tensor::overload_name)
      .typed<linalg_matrix_rank_atol_rtol_tensor::schema>();
}

// aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
at::Tensor linalg_matrix_rank_atol_rtol_tensor::call(const at::Tensor & input, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian) {
    
    static auto op = create_linalg_matrix_rank_atol_rtol_tensor_typed_handle();
    return op.call(input, atol, rtol, hermitian);
}

// aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
at::Tensor linalg_matrix_rank_atol_rtol_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian) {
    
    static auto op = create_linalg_matrix_rank_atol_rtol_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, input, atol, rtol, hermitian);
}

// aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank_atol_rtol_tensor_out::schema> create_linalg_matrix_rank_atol_rtol_tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_rank_atol_rtol_tensor_out::name, linalg_matrix_rank_atol_rtol_tensor_out::overload_name)
      .typed<linalg_matrix_rank_atol_rtol_tensor_out::schema>();
}

// aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_rank_atol_rtol_tensor_out::call(const at::Tensor & input, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_rank_atol_rtol_tensor_out_typed_handle();
    return op.call(input, atol, rtol, hermitian, out);
}

// aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_rank_atol_rtol_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_rank_atol_rtol_tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, atol, rtol, hermitian, out);
}

// aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank_atol_rtol_float::schema> create_linalg_matrix_rank_atol_rtol_float_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_rank_atol_rtol_float::name, linalg_matrix_rank_atol_rtol_float::overload_name)
      .typed<linalg_matrix_rank_atol_rtol_float::schema>();
}

// aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
at::Tensor linalg_matrix_rank_atol_rtol_float::call(const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian) {
    
    static auto op = create_linalg_matrix_rank_atol_rtol_float_typed_handle();
    return op.call(self, atol, rtol, hermitian);
}

// aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
at::Tensor linalg_matrix_rank_atol_rtol_float::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian) {
    
    static auto op = create_linalg_matrix_rank_atol_rtol_float_typed_handle();
    return op.redispatch(dispatchKeySet, self, atol, rtol, hermitian);
}

// aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank_atol_rtol_float_out::schema> create_linalg_matrix_rank_atol_rtol_float_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_rank_atol_rtol_float_out::name, linalg_matrix_rank_atol_rtol_float_out::overload_name)
      .typed<linalg_matrix_rank_atol_rtol_float_out::schema>();
}

// aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_rank_atol_rtol_float_out::call(const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_rank_atol_rtol_float_out_typed_handle();
    return op.call(self, atol, rtol, hermitian, out);
}

// aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_rank_atol_rtol_float_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_rank_atol_rtol_float_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, atol, rtol, hermitian, out);
}

// aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank::schema> create_linalg_matrix_rank_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_rank::name, linalg_matrix_rank::overload_name)
      .typed<linalg_matrix_rank::schema>();
}

// aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor
at::Tensor linalg_matrix_rank::call(const at::Tensor & self, double tol, bool hermitian) {
    
    static auto op = create_linalg_matrix_rank_typed_handle();
    return op.call(self, tol, hermitian);
}

// aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor
at::Tensor linalg_matrix_rank::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double tol, bool hermitian) {
    
    static auto op = create_linalg_matrix_rank_typed_handle();
    return op.redispatch(dispatchKeySet, self, tol, hermitian);
}

// aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank_out::schema> create_linalg_matrix_rank_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_rank_out::name, linalg_matrix_rank_out::overload_name)
      .typed<linalg_matrix_rank_out::schema>();
}

// aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_rank_out::call(const at::Tensor & self, double tol, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_rank_out_typed_handle();
    return op.call(self, tol, hermitian, out);
}

// aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_rank_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double tol, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_rank_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tol, hermitian, out);
}

// aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank_tol_tensor::schema> create_linalg_matrix_rank_tol_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_rank_tol_tensor::name, linalg_matrix_rank_tol_tensor::overload_name)
      .typed<linalg_matrix_rank_tol_tensor::schema>();
}

// aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor
at::Tensor linalg_matrix_rank_tol_tensor::call(const at::Tensor & input, const at::Tensor & tol, bool hermitian) {
    
    static auto op = create_linalg_matrix_rank_tol_tensor_typed_handle();
    return op.call(input, tol, hermitian);
}

// aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor
at::Tensor linalg_matrix_rank_tol_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tol, bool hermitian) {
    
    static auto op = create_linalg_matrix_rank_tol_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, input, tol, hermitian);
}

// aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_rank_out_tol_tensor::schema> create_linalg_matrix_rank_out_tol_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_rank_out_tol_tensor::name, linalg_matrix_rank_out_tol_tensor::overload_name)
      .typed<linalg_matrix_rank_out_tol_tensor::schema>();
}

// aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_rank_out_tol_tensor::call(const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_rank_out_tol_tensor_typed_handle();
    return op.call(input, tol, hermitian, out);
}

// aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_rank_out_tol_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_rank_out_tol_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, input, tol, hermitian, out);
}

// aten::linalg_multi_dot(Tensor[] tensors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<linalg_multi_dot::schema> create_linalg_multi_dot_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_multi_dot::name, linalg_multi_dot::overload_name)
      .typed<linalg_multi_dot::schema>();
}

// aten::linalg_multi_dot(Tensor[] tensors) -> Tensor
at::Tensor linalg_multi_dot::call(at::TensorList tensors) {
    
    static auto op = create_linalg_multi_dot_typed_handle();
    return op.call(tensors);
}

// aten::linalg_multi_dot(Tensor[] tensors) -> Tensor
at::Tensor linalg_multi_dot::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_linalg_multi_dot_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_multi_dot_out::schema> create_linalg_multi_dot_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_multi_dot_out::name, linalg_multi_dot_out::overload_name)
      .typed<linalg_multi_dot_out::schema>();
}

// aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_multi_dot_out::call(at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_linalg_multi_dot_out_typed_handle();
    return op.call(tensors, out);
}

// aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_multi_dot_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_linalg_multi_dot_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, out);
}

// aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<nested_to_padded_tensor::schema> create_nested_to_padded_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nested_to_padded_tensor::name, nested_to_padded_tensor::overload_name)
      .typed<nested_to_padded_tensor::schema>();
}

// aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor
at::Tensor nested_to_padded_tensor::call(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) {
    
    static auto op = create_nested_to_padded_tensor_typed_handle();
    return op.call(self, padding, output_size);
}

// aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor
at::Tensor nested_to_padded_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) {
    
    static auto op = create_nested_to_padded_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding, output_size);
}

// aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_test_serialization_subcmul::schema> create__test_serialization_subcmul_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_serialization_subcmul::name, _test_serialization_subcmul::overload_name)
      .typed<_test_serialization_subcmul::schema>();
}

// aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor
at::Tensor _test_serialization_subcmul::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create__test_serialization_subcmul_typed_handle();
    return op.call(self, other, alpha);
}

// aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor
at::Tensor _test_serialization_subcmul::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
    
    static auto op = create__test_serialization_subcmul_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha);
}

// aten::_test_parallel_materialize(Tensor self, int num_parallel, bool skip_first=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_test_parallel_materialize::schema> create__test_parallel_materialize_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_parallel_materialize::name, _test_parallel_materialize::overload_name)
      .typed<_test_parallel_materialize::schema>();
}

// aten::_test_parallel_materialize(Tensor self, int num_parallel, bool skip_first=False) -> Tensor
at::Tensor _test_parallel_materialize::call(const at::Tensor & self, int64_t num_parallel, bool skip_first) {
    
    static auto op = create__test_parallel_materialize_typed_handle();
    return op.call(self, num_parallel, skip_first);
}

// aten::_test_parallel_materialize(Tensor self, int num_parallel, bool skip_first=False) -> Tensor
at::Tensor _test_parallel_materialize::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_parallel, bool skip_first) {
    
    static auto op = create__test_parallel_materialize_typed_handle();
    return op.redispatch(dispatchKeySet, self, num_parallel, skip_first);
}

// aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_test_optional_intlist::schema> create__test_optional_intlist_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_optional_intlist::name, _test_optional_intlist::overload_name)
      .typed<_test_optional_intlist::schema>();
}

// aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor
at::Tensor _test_optional_intlist::call(const at::Tensor & values, at::OptionalIntArrayRef addends) {
    
    static auto op = create__test_optional_intlist_typed_handle();
    return op.call(values, addends);
}

// aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor
at::Tensor _test_optional_intlist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends) {
    
    static auto op = create__test_optional_intlist_typed_handle();
    return op.redispatch(dispatchKeySet, values, addends);
}

// aten::_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_test_optional_filled_intlist::schema> create__test_optional_filled_intlist_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_optional_filled_intlist::name, _test_optional_filled_intlist::overload_name)
      .typed<_test_optional_filled_intlist::schema>();
}

// aten::_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor
at::Tensor _test_optional_filled_intlist::call(const at::Tensor & values, at::OptionalIntArrayRef addends) {
    
    static auto op = create__test_optional_filled_intlist_typed_handle();
    return op.call(values, addends);
}

// aten::_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor
at::Tensor _test_optional_filled_intlist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends) {
    
    static auto op = create__test_optional_filled_intlist_typed_handle();
    return op.redispatch(dispatchKeySet, values, addends);
}

// aten::_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_test_optional_floatlist::schema> create__test_optional_floatlist_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_optional_floatlist::name, _test_optional_floatlist::overload_name)
      .typed<_test_optional_floatlist::schema>();
}

// aten::_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor
at::Tensor _test_optional_floatlist::call(const at::Tensor & values, ::std::optional<at::ArrayRef<double>> addends) {
    
    static auto op = create__test_optional_floatlist_typed_handle();
    return op.call(values, addends);
}

// aten::_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor
at::Tensor _test_optional_floatlist::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, ::std::optional<at::ArrayRef<double>> addends) {
    
    static auto op = create__test_optional_floatlist_typed_handle();
    return op.redispatch(dispatchKeySet, values, addends);
}

// aten::_test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_test_string_default::schema> create__test_string_default_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_string_default::name, _test_string_default::overload_name)
      .typed<_test_string_default::schema>();
}

// aten::_test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor
at::Tensor _test_string_default::call(const at::Tensor & dummy, c10::string_view a, c10::string_view b) {
    
    static auto op = create__test_string_default_typed_handle();
    return op.call(dummy, a, b);
}

// aten::_test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor
at::Tensor _test_string_default::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, c10::string_view a, c10::string_view b) {
    
    static auto op = create__test_string_default_typed_handle();
    return op.redispatch(dispatchKeySet, dummy, a, b);
}

// aten::_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_test_ambiguous_defaults_a::schema> create__test_ambiguous_defaults_a_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_ambiguous_defaults_a::name, _test_ambiguous_defaults_a::overload_name)
      .typed<_test_ambiguous_defaults_a::schema>();
}

// aten::_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor
at::Tensor _test_ambiguous_defaults_a::call(const at::Tensor & dummy, int64_t a, int64_t b) {
    
    static auto op = create__test_ambiguous_defaults_a_typed_handle();
    return op.call(dummy, a, b);
}

// aten::_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor
at::Tensor _test_ambiguous_defaults_a::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, int64_t a, int64_t b) {
    
    static auto op = create__test_ambiguous_defaults_a_typed_handle();
    return op.redispatch(dispatchKeySet, dummy, a, b);
}

// aten::_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_test_ambiguous_defaults_b::schema> create__test_ambiguous_defaults_b_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_ambiguous_defaults_b::name, _test_ambiguous_defaults_b::overload_name)
      .typed<_test_ambiguous_defaults_b::schema>();
}

// aten::_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor
at::Tensor _test_ambiguous_defaults_b::call(const at::Tensor & dummy, int64_t a, c10::string_view b) {
    
    static auto op = create__test_ambiguous_defaults_b_typed_handle();
    return op.call(dummy, a, b);
}

// aten::_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor
at::Tensor _test_ambiguous_defaults_b::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, int64_t a, c10::string_view b) {
    
    static auto op = create__test_ambiguous_defaults_b_typed_handle();
    return op.redispatch(dispatchKeySet, dummy, a, b);
}

// aten::_test_warn_in_autograd(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_test_warn_in_autograd::schema> create__test_warn_in_autograd_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_warn_in_autograd::name, _test_warn_in_autograd::overload_name)
      .typed<_test_warn_in_autograd::schema>();
}

// aten::_test_warn_in_autograd(Tensor self) -> Tensor
at::Tensor _test_warn_in_autograd::call(const at::Tensor & self) {
    
    static auto op = create__test_warn_in_autograd_typed_handle();
    return op.call(self);
}

// aten::_test_warn_in_autograd(Tensor self) -> Tensor
at::Tensor _test_warn_in_autograd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__test_warn_in_autograd_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_test_autograd_multiple_dispatch_fullcoverage::schema> create__test_autograd_multiple_dispatch_fullcoverage_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_autograd_multiple_dispatch_fullcoverage::name, _test_autograd_multiple_dispatch_fullcoverage::overload_name)
      .typed<_test_autograd_multiple_dispatch_fullcoverage::schema>();
}

// aten::_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor
at::Tensor _test_autograd_multiple_dispatch_fullcoverage::call(const at::Tensor & self) {
    
    static auto op = create__test_autograd_multiple_dispatch_fullcoverage_typed_handle();
    return op.call(self);
}

// aten::_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor
at::Tensor _test_autograd_multiple_dispatch_fullcoverage::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__test_autograd_multiple_dispatch_fullcoverage_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_test_autograd_multiple_dispatch_ntonly::schema> create__test_autograd_multiple_dispatch_ntonly_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_autograd_multiple_dispatch_ntonly::name, _test_autograd_multiple_dispatch_ntonly::overload_name)
      .typed<_test_autograd_multiple_dispatch_ntonly::schema>();
}

// aten::_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor
at::Tensor _test_autograd_multiple_dispatch_ntonly::call(const at::Tensor & self, bool b) {
    
    static auto op = create__test_autograd_multiple_dispatch_ntonly_typed_handle();
    return op.call(self, b);
}

// aten::_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor
at::Tensor _test_autograd_multiple_dispatch_ntonly::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool b) {
    
    static auto op = create__test_autograd_multiple_dispatch_ntonly_typed_handle();
    return op.redispatch(dispatchKeySet, self, b);
}

// aten::_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)
static C10_NOINLINE c10::TypedOperatorHandle<_test_autograd_multiple_dispatch_view::schema> create__test_autograd_multiple_dispatch_view_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_autograd_multiple_dispatch_view::name, _test_autograd_multiple_dispatch_view::overload_name)
      .typed<_test_autograd_multiple_dispatch_view::schema>();
}

// aten::_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)
at::Tensor _test_autograd_multiple_dispatch_view::call(const at::Tensor & self) {
    
    static auto op = create__test_autograd_multiple_dispatch_view_typed_handle();
    return op.call(self);
}

// aten::_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)
at::Tensor _test_autograd_multiple_dispatch_view::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__test_autograd_multiple_dispatch_view_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_test_autograd_multiple_dispatch_view_copy::schema> create__test_autograd_multiple_dispatch_view_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_autograd_multiple_dispatch_view_copy::name, _test_autograd_multiple_dispatch_view_copy::overload_name)
      .typed<_test_autograd_multiple_dispatch_view_copy::schema>();
}

// aten::_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor
at::Tensor _test_autograd_multiple_dispatch_view_copy::call(const at::Tensor & self) {
    
    static auto op = create__test_autograd_multiple_dispatch_view_copy_typed_handle();
    return op.call(self);
}

// aten::_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor
at::Tensor _test_autograd_multiple_dispatch_view_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__test_autograd_multiple_dispatch_view_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<segment_reduce::schema> create_segment_reduce_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(segment_reduce::name, segment_reduce::overload_name)
      .typed<segment_reduce::schema>();
}

// aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor
at::Tensor segment_reduce::call(const at::Tensor & data, c10::string_view reduce, const ::std::optional<at::Tensor> & lengths, const ::std::optional<at::Tensor> & indices, const ::std::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const ::std::optional<at::Scalar> & initial) {
    
    static auto op = create_segment_reduce_typed_handle();
    return op.call(data, reduce, lengths, indices, offsets, axis, unsafe, initial);
}

// aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor
at::Tensor segment_reduce::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, c10::string_view reduce, const ::std::optional<at::Tensor> & lengths, const ::std::optional<at::Tensor> & indices, const ::std::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const ::std::optional<at::Scalar> & initial) {
    
    static auto op = create_segment_reduce_typed_handle();
    return op.redispatch(dispatchKeySet, data, reduce, lengths, indices, offsets, axis, unsafe, initial);
}

// aten::_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_segment_reduce_backward::schema> create__segment_reduce_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_segment_reduce_backward::name, _segment_reduce_backward::overload_name)
      .typed<_segment_reduce_backward::schema>();
}

// aten::_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor
at::Tensor _segment_reduce_backward::call(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const ::std::optional<at::Tensor> & lengths, const ::std::optional<at::Tensor> & offsets, int64_t axis, const ::std::optional<at::Scalar> & initial) {
    
    static auto op = create__segment_reduce_backward_typed_handle();
    return op.call(grad, output, data, reduce, lengths, offsets, axis, initial);
}

// aten::_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor
at::Tensor _segment_reduce_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const ::std::optional<at::Tensor> & lengths, const ::std::optional<at::Tensor> & offsets, int64_t axis, const ::std::optional<at::Scalar> & initial) {
    
    static auto op = create__segment_reduce_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad, output, data, reduce, lengths, offsets, axis, initial);
}

// aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0, str padding_side="right") -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<pad_sequence::schema> create_pad_sequence_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pad_sequence::name, pad_sequence::overload_name)
      .typed<pad_sequence::schema>();
}

// aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0, str padding_side="right") -> Tensor
at::Tensor pad_sequence::call(at::TensorList sequences, bool batch_first, double padding_value, c10::string_view padding_side) {
    
    static auto op = create_pad_sequence_typed_handle();
    return op.call(sequences, batch_first, padding_value, padding_side);
}

// aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0, str padding_side="right") -> Tensor
at::Tensor pad_sequence::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList sequences, bool batch_first, double padding_value, c10::string_view padding_side) {
    
    static auto op = create_pad_sequence_typed_handle();
    return op.redispatch(dispatchKeySet, sequences, batch_first, padding_value, padding_side);
}

// aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<flatten_dense_tensors::schema> create_flatten_dense_tensors_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(flatten_dense_tensors::name, flatten_dense_tensors::overload_name)
      .typed<flatten_dense_tensors::schema>();
}

// aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor
at::Tensor flatten_dense_tensors::call(at::TensorList tensors) {
    
    static auto op = create_flatten_dense_tensors_typed_handle();
    return op.call(tensors);
}

// aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor
at::Tensor flatten_dense_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) {
    
    static auto op = create_flatten_dense_tensors_typed_handle();
    return op.redispatch(dispatchKeySet, tensors);
}

// aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<unflatten_dense_tensors::schema> create_unflatten_dense_tensors_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unflatten_dense_tensors::name, unflatten_dense_tensors::overload_name)
      .typed<unflatten_dense_tensors::schema>();
}

// aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> unflatten_dense_tensors::call(const at::Tensor & flat, at::TensorList tensors) {
    
    static auto op = create_unflatten_dense_tensors_typed_handle();
    return op.call(flat, tensors);
}

// aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]
::std::vector<at::Tensor> unflatten_dense_tensors::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & flat, at::TensorList tensors) {
    
    static auto op = create_unflatten_dense_tensors_typed_handle();
    return op.redispatch(dispatchKeySet, flat, tensors);
}

// aten::_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_from_tensor_list::schema> create__nested_tensor_from_tensor_list_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_from_tensor_list::name, _nested_tensor_from_tensor_list::overload_name)
      .typed<_nested_tensor_from_tensor_list::schema>();
}

// aten::_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _nested_tensor_from_tensor_list::call(at::TensorList list, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__nested_tensor_from_tensor_list_typed_handle();
    return op.call(list, dtype, layout, device, pin_memory);
}

// aten::_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
at::Tensor _nested_tensor_from_tensor_list::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList list, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory) {
    
    static auto op = create__nested_tensor_from_tensor_list_typed_handle();
    return op.redispatch(dispatchKeySet, list, dtype, layout, device, pin_memory);
}

// aten::_fw_primal_copy(Tensor self, int level) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_fw_primal_copy::schema> create__fw_primal_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fw_primal_copy::name, _fw_primal_copy::overload_name)
      .typed<_fw_primal_copy::schema>();
}

// aten::_fw_primal_copy(Tensor self, int level) -> Tensor
at::Tensor _fw_primal_copy::call(const at::Tensor & self, int64_t level) {
    
    static auto op = create__fw_primal_copy_typed_handle();
    return op.call(self, level);
}

// aten::_fw_primal_copy(Tensor self, int level) -> Tensor
at::Tensor _fw_primal_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level) {
    
    static auto op = create__fw_primal_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, level);
}

// aten::_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_make_dual_copy::schema> create__make_dual_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_make_dual_copy::name, _make_dual_copy::overload_name)
      .typed<_make_dual_copy::schema>();
}

// aten::_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor
at::Tensor _make_dual_copy::call(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
    
    static auto op = create__make_dual_copy_typed_handle();
    return op.call(primal, tangent, level);
}

// aten::_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor
at::Tensor _make_dual_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
    
    static auto op = create__make_dual_copy_typed_handle();
    return op.redispatch(dispatchKeySet, primal, tangent, level);
}

// aten::view_as_real_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<view_as_real_copy::schema> create_view_as_real_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_as_real_copy::name, view_as_real_copy::overload_name)
      .typed<view_as_real_copy::schema>();
}

// aten::view_as_real_copy(Tensor self) -> Tensor
at::Tensor view_as_real_copy::call(const at::Tensor & self) {
    
    static auto op = create_view_as_real_copy_typed_handle();
    return op.call(self);
}

// aten::view_as_real_copy(Tensor self) -> Tensor
at::Tensor view_as_real_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_view_as_real_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::view_as_complex_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<view_as_complex_copy::schema> create_view_as_complex_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_as_complex_copy::name, view_as_complex_copy::overload_name)
      .typed<view_as_complex_copy::schema>();
}

// aten::view_as_complex_copy(Tensor self) -> Tensor
at::Tensor view_as_complex_copy::call(const at::Tensor & self) {
    
    static auto op = create_view_as_complex_copy_typed_handle();
    return op.call(self);
}

// aten::view_as_complex_copy(Tensor self) -> Tensor
at::Tensor view_as_complex_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_view_as_complex_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_conj_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_conj_copy::schema> create__conj_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_conj_copy::name, _conj_copy::overload_name)
      .typed<_conj_copy::schema>();
}

// aten::_conj_copy(Tensor self) -> Tensor
at::Tensor _conj_copy::call(const at::Tensor & self) {
    
    static auto op = create__conj_copy_typed_handle();
    return op.call(self);
}

// aten::_conj_copy(Tensor self) -> Tensor
at::Tensor _conj_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__conj_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_neg_view_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_neg_view_copy::schema> create__neg_view_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_neg_view_copy::name, _neg_view_copy::overload_name)
      .typed<_neg_view_copy::schema>();
}

// aten::_neg_view_copy(Tensor self) -> Tensor
at::Tensor _neg_view_copy::call(const at::Tensor & self) {
    
    static auto op = create__neg_view_copy_typed_handle();
    return op.call(self);
}

// aten::_neg_view_copy(Tensor self) -> Tensor
at::Tensor _neg_view_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__neg_view_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<as_strided_copy::schema> create_as_strided_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(as_strided_copy::name, as_strided_copy::overload_name)
      .typed<as_strided_copy::schema>();
}

// aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
at::Tensor as_strided_copy::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
    
    static auto op = create_as_strided_copy_typed_handle();
    return op.call(self, size, stride, storage_offset);
}

// aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
at::Tensor as_strided_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
    
    static auto op = create_as_strided_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, stride, storage_offset);
}

// aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_broadcast_to_copy::schema> create__sparse_broadcast_to_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_broadcast_to_copy::name, _sparse_broadcast_to_copy::overload_name)
      .typed<_sparse_broadcast_to_copy::schema>();
}

// aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor
at::Tensor _sparse_broadcast_to_copy::call(const at::Tensor & self, at::IntArrayRef size) {
    
    static auto op = create__sparse_broadcast_to_copy_typed_handle();
    return op.call(self, size);
}

// aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor
at::Tensor _sparse_broadcast_to_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) {
    
    static auto op = create__sparse_broadcast_to_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, size);
}

// aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<diagonal_copy::schema> create_diagonal_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diagonal_copy::name, diagonal_copy::overload_name)
      .typed<diagonal_copy::schema>();
}

// aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor
at::Tensor diagonal_copy::call(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
    
    static auto op = create_diagonal_copy_typed_handle();
    return op.call(self, offset, dim1, dim2);
}

// aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor
at::Tensor diagonal_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
    
    static auto op = create_diagonal_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, offset, dim1, dim2);
}

// aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<expand_copy::schema> create_expand_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(expand_copy::name, expand_copy::overload_name)
      .typed<expand_copy::schema>();
}

// aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor
at::Tensor expand_copy::call(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
    
    static auto op = create_expand_copy_typed_handle();
    return op.call(self, size, implicit);
}

// aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor
at::Tensor expand_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
    
    static auto op = create_expand_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, implicit);
}

// aten::permute_copy(Tensor self, int[] dims) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<permute_copy::schema> create_permute_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(permute_copy::name, permute_copy::overload_name)
      .typed<permute_copy::schema>();
}

// aten::permute_copy(Tensor self, int[] dims) -> Tensor
at::Tensor permute_copy::call(const at::Tensor & self, at::IntArrayRef dims) {
    
    static auto op = create_permute_copy_typed_handle();
    return op.call(self, dims);
}

// aten::permute_copy(Tensor self, int[] dims) -> Tensor
at::Tensor permute_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) {
    
    static auto op = create_permute_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, dims);
}

// aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_reshape_alias_copy::schema> create__reshape_alias_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_reshape_alias_copy::name, _reshape_alias_copy::overload_name)
      .typed<_reshape_alias_copy::schema>();
}

// aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor
at::Tensor _reshape_alias_copy::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create__reshape_alias_copy_typed_handle();
    return op.call(self, size, stride);
}

// aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor
at::Tensor _reshape_alias_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create__reshape_alias_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, stride);
}

// aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<select_copy_int::schema> create_select_copy_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(select_copy_int::name, select_copy_int::overload_name)
      .typed<select_copy_int::schema>();
}

// aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor
at::Tensor select_copy_int::call(const at::Tensor & self, int64_t dim, c10::SymInt index) {
    
    static auto op = create_select_copy_int_typed_handle();
    return op.call(self, dim, index);
}

// aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor
at::Tensor select_copy_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt index) {
    
    static auto op = create_select_copy_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index);
}

// aten::detach_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<detach_copy::schema> create_detach_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(detach_copy::name, detach_copy::overload_name)
      .typed<detach_copy::schema>();
}

// aten::detach_copy(Tensor self) -> Tensor
at::Tensor detach_copy::call(const at::Tensor & self) {
    
    static auto op = create_detach_copy_typed_handle();
    return op.call(self);
}

// aten::detach_copy(Tensor self) -> Tensor
at::Tensor detach_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_detach_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<slice_copy_Tensor::schema> create_slice_copy_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slice_copy_Tensor::name, slice_copy_Tensor::overload_name)
      .typed<slice_copy_Tensor::schema>();
}

// aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
at::Tensor slice_copy_Tensor::call(const at::Tensor & self, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
    
    static auto op = create_slice_copy_Tensor_typed_handle();
    return op.call(self, dim, start, end, step);
}

// aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
at::Tensor slice_copy_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
    
    static auto op = create_slice_copy_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, start, end, step);
}

// aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<split_copy_Tensor::schema> create_split_copy_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(split_copy_Tensor::name, split_copy_Tensor::overload_name)
      .typed<split_copy_Tensor::schema>();
}

// aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
::std::vector<at::Tensor> split_copy_Tensor::call(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
    
    static auto op = create_split_copy_Tensor_typed_handle();
    return op.call(self, split_size, dim);
}

// aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
::std::vector<at::Tensor> split_copy_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
    
    static auto op = create_split_copy_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, split_size, dim);
}

// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<split_with_sizes_copy::schema> create_split_with_sizes_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(split_with_sizes_copy::name, split_with_sizes_copy::overload_name)
      .typed<split_with_sizes_copy::schema>();
}

// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
::std::vector<at::Tensor> split_with_sizes_copy::call(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
    
    static auto op = create_split_with_sizes_copy_typed_handle();
    return op.call(self, split_sizes, dim);
}

// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
::std::vector<at::Tensor> split_with_sizes_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
    
    static auto op = create_split_with_sizes_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, split_sizes, dim);
}

// aten::squeeze_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<squeeze_copy::schema> create_squeeze_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(squeeze_copy::name, squeeze_copy::overload_name)
      .typed<squeeze_copy::schema>();
}

// aten::squeeze_copy(Tensor self) -> Tensor
at::Tensor squeeze_copy::call(const at::Tensor & self) {
    
    static auto op = create_squeeze_copy_typed_handle();
    return op.call(self);
}

// aten::squeeze_copy(Tensor self) -> Tensor
at::Tensor squeeze_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_squeeze_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<squeeze_copy_dim::schema> create_squeeze_copy_dim_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(squeeze_copy_dim::name, squeeze_copy_dim::overload_name)
      .typed<squeeze_copy_dim::schema>();
}

// aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor
at::Tensor squeeze_copy_dim::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_squeeze_copy_dim_typed_handle();
    return op.call(self, dim);
}

// aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor
at::Tensor squeeze_copy_dim::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_squeeze_copy_dim_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<squeeze_copy_dims::schema> create_squeeze_copy_dims_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(squeeze_copy_dims::name, squeeze_copy_dims::overload_name)
      .typed<squeeze_copy_dims::schema>();
}

// aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor
at::Tensor squeeze_copy_dims::call(const at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create_squeeze_copy_dims_typed_handle();
    return op.call(self, dim);
}

// aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor
at::Tensor squeeze_copy_dims::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) {
    
    static auto op = create_squeeze_copy_dims_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::t_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<t_copy::schema> create_t_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(t_copy::name, t_copy::overload_name)
      .typed<t_copy::schema>();
}

// aten::t_copy(Tensor self) -> Tensor
at::Tensor t_copy::call(const at::Tensor & self) {
    
    static auto op = create_t_copy_typed_handle();
    return op.call(self);
}

// aten::t_copy(Tensor self) -> Tensor
at::Tensor t_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_t_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<transpose_copy_int::schema> create_transpose_copy_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(transpose_copy_int::name, transpose_copy_int::overload_name)
      .typed<transpose_copy_int::schema>();
}

// aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor
at::Tensor transpose_copy_int::call(const at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create_transpose_copy_int_typed_handle();
    return op.call(self, dim0, dim1);
}

// aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor
at::Tensor transpose_copy_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) {
    
    static auto op = create_transpose_copy_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim0, dim1);
}

// aten::unsqueeze_copy(Tensor self, int dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<unsqueeze_copy::schema> create_unsqueeze_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unsqueeze_copy::name, unsqueeze_copy::overload_name)
      .typed<unsqueeze_copy::schema>();
}

// aten::unsqueeze_copy(Tensor self, int dim) -> Tensor
at::Tensor unsqueeze_copy::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_unsqueeze_copy_typed_handle();
    return op.call(self, dim);
}

// aten::unsqueeze_copy(Tensor self, int dim) -> Tensor
at::Tensor unsqueeze_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_unsqueeze_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::_indices_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_indices_copy::schema> create__indices_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_indices_copy::name, _indices_copy::overload_name)
      .typed<_indices_copy::schema>();
}

// aten::_indices_copy(Tensor self) -> Tensor
at::Tensor _indices_copy::call(const at::Tensor & self) {
    
    static auto op = create__indices_copy_typed_handle();
    return op.call(self);
}

// aten::_indices_copy(Tensor self) -> Tensor
at::Tensor _indices_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__indices_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_values_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_values_copy::schema> create__values_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_values_copy::name, _values_copy::overload_name)
      .typed<_values_copy::schema>();
}

// aten::_values_copy(Tensor self) -> Tensor
at::Tensor _values_copy::call(const at::Tensor & self) {
    
    static auto op = create__values_copy_typed_handle();
    return op.call(self);
}

// aten::_values_copy(Tensor self) -> Tensor
at::Tensor _values_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create__values_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::indices_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<indices_copy::schema> create_indices_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(indices_copy::name, indices_copy::overload_name)
      .typed<indices_copy::schema>();
}

// aten::indices_copy(Tensor self) -> Tensor
at::Tensor indices_copy::call(const at::Tensor & self) {
    
    static auto op = create_indices_copy_typed_handle();
    return op.call(self);
}

// aten::indices_copy(Tensor self) -> Tensor
at::Tensor indices_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_indices_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::values_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<values_copy::schema> create_values_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(values_copy::name, values_copy::overload_name)
      .typed<values_copy::schema>();
}

// aten::values_copy(Tensor self) -> Tensor
at::Tensor values_copy::call(const at::Tensor & self) {
    
    static auto op = create_values_copy_typed_handle();
    return op.call(self);
}

// aten::values_copy(Tensor self) -> Tensor
at::Tensor values_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_values_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::crow_indices_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<crow_indices_copy::schema> create_crow_indices_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(crow_indices_copy::name, crow_indices_copy::overload_name)
      .typed<crow_indices_copy::schema>();
}

// aten::crow_indices_copy(Tensor self) -> Tensor
at::Tensor crow_indices_copy::call(const at::Tensor & self) {
    
    static auto op = create_crow_indices_copy_typed_handle();
    return op.call(self);
}

// aten::crow_indices_copy(Tensor self) -> Tensor
at::Tensor crow_indices_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_crow_indices_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::col_indices_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<col_indices_copy::schema> create_col_indices_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(col_indices_copy::name, col_indices_copy::overload_name)
      .typed<col_indices_copy::schema>();
}

// aten::col_indices_copy(Tensor self) -> Tensor
at::Tensor col_indices_copy::call(const at::Tensor & self) {
    
    static auto op = create_col_indices_copy_typed_handle();
    return op.call(self);
}

// aten::col_indices_copy(Tensor self) -> Tensor
at::Tensor col_indices_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_col_indices_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::ccol_indices_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<ccol_indices_copy::schema> create_ccol_indices_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ccol_indices_copy::name, ccol_indices_copy::overload_name)
      .typed<ccol_indices_copy::schema>();
}

// aten::ccol_indices_copy(Tensor self) -> Tensor
at::Tensor ccol_indices_copy::call(const at::Tensor & self) {
    
    static auto op = create_ccol_indices_copy_typed_handle();
    return op.call(self);
}

// aten::ccol_indices_copy(Tensor self) -> Tensor
at::Tensor ccol_indices_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_ccol_indices_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::row_indices_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<row_indices_copy::schema> create_row_indices_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(row_indices_copy::name, row_indices_copy::overload_name)
      .typed<row_indices_copy::schema>();
}

// aten::row_indices_copy(Tensor self) -> Tensor
at::Tensor row_indices_copy::call(const at::Tensor & self) {
    
    static auto op = create_row_indices_copy_typed_handle();
    return op.call(self);
}

// aten::row_indices_copy(Tensor self) -> Tensor
at::Tensor row_indices_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_row_indices_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[]
static C10_NOINLINE c10::TypedOperatorHandle<unbind_copy_int::schema> create_unbind_copy_int_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unbind_copy_int::name, unbind_copy_int::overload_name)
      .typed<unbind_copy_int::schema>();
}

// aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[]
::std::vector<at::Tensor> unbind_copy_int::call(const at::Tensor & self, int64_t dim) {
    
    static auto op = create_unbind_copy_int_typed_handle();
    return op.call(self, dim);
}

// aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[]
::std::vector<at::Tensor> unbind_copy_int::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
    
    static auto op = create_unbind_copy_int_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim);
}

// aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<unbind_copy_int_out::schema> create_unbind_copy_int_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unbind_copy_int_out::name, unbind_copy_int_out::overload_name)
      .typed<unbind_copy_int_out::schema>();
}

// aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()
void unbind_copy_int_out::call(const at::Tensor & self, int64_t dim, at::TensorList out) {
    
    static auto op = create_unbind_copy_int_out_typed_handle();
    return op.call(self, dim, out);
}

// aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()
void unbind_copy_int_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::TensorList out) {
    
    static auto op = create_unbind_copy_int_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, out);
}

// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<split_copy_Tensor_out::schema> create_split_copy_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(split_copy_Tensor_out::name, split_copy_Tensor_out::overload_name)
      .typed<split_copy_Tensor_out::schema>();
}

// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
void split_copy_Tensor_out::call(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
    
    static auto op = create_split_copy_Tensor_out_typed_handle();
    return op.call(self, split_size, dim, out);
}

// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
void split_copy_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
    
    static auto op = create_split_copy_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, split_size, dim, out);
}

// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<split_with_sizes_copy_out::schema> create_split_with_sizes_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(split_with_sizes_copy_out::name, split_with_sizes_copy_out::overload_name)
      .typed<split_with_sizes_copy_out::schema>();
}

// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
void split_with_sizes_copy_out::call(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
    
    static auto op = create_split_with_sizes_copy_out_typed_handle();
    return op.call(self, split_sizes, dim, out);
}

// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
void split_with_sizes_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
    
    static auto op = create_split_with_sizes_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, split_sizes, dim, out);
}

// aten::view_copy(Tensor self, SymInt[] size) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<view_copy::schema> create_view_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_copy::name, view_copy::overload_name)
      .typed<view_copy::schema>();
}

// aten::view_copy(Tensor self, SymInt[] size) -> Tensor
at::Tensor view_copy::call(const at::Tensor & self, c10::SymIntArrayRef size) {
    
    static auto op = create_view_copy_typed_handle();
    return op.call(self, size);
}

// aten::view_copy(Tensor self, SymInt[] size) -> Tensor
at::Tensor view_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) {
    
    static auto op = create_view_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, size);
}

// aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<view_copy_dtype::schema> create_view_copy_dtype_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_copy_dtype::name, view_copy_dtype::overload_name)
      .typed<view_copy_dtype::schema>();
}

// aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor
at::Tensor view_copy_dtype::call(const at::Tensor & self, at::ScalarType dtype) {
    
    static auto op = create_view_copy_dtype_typed_handle();
    return op.call(self, dtype);
}

// aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor
at::Tensor view_copy_dtype::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype) {
    
    static auto op = create_view_copy_dtype_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype);
}

// aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<unfold_copy::schema> create_unfold_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unfold_copy::name, unfold_copy::overload_name)
      .typed<unfold_copy::schema>();
}

// aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor
at::Tensor unfold_copy::call(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
    
    static auto op = create_unfold_copy_typed_handle();
    return op.call(self, dimension, size, step);
}

// aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor
at::Tensor unfold_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
    
    static auto op = create_unfold_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self, dimension, size, step);
}

// aten::alias_copy(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<alias_copy::schema> create_alias_copy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(alias_copy::name, alias_copy::overload_name)
      .typed<alias_copy::schema>();
}

// aten::alias_copy(Tensor self) -> Tensor
at::Tensor alias_copy::call(const at::Tensor & self) {
    
    static auto op = create_alias_copy_typed_handle();
    return op.call(self);
}

// aten::alias_copy(Tensor self) -> Tensor
at::Tensor alias_copy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_alias_copy_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<to_padded_tensor::schema> create_to_padded_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_padded_tensor::name, to_padded_tensor::overload_name)
      .typed<to_padded_tensor::schema>();
}

// aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor
at::Tensor to_padded_tensor::call(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size) {
    
    static auto op = create_to_padded_tensor_typed_handle();
    return op.call(self, padding, output_size);
}

// aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor
at::Tensor to_padded_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size) {
    
    static auto op = create_to_padded_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding, output_size);
}

// aten::_jagged_to_padded_dense_forward(Tensor values, Tensor[] offsets, SymInt[] max_lengths, float padding_value=0.0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_jagged_to_padded_dense_forward::schema> create__jagged_to_padded_dense_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_jagged_to_padded_dense_forward::name, _jagged_to_padded_dense_forward::overload_name)
      .typed<_jagged_to_padded_dense_forward::schema>();
}

// aten::_jagged_to_padded_dense_forward(Tensor values, Tensor[] offsets, SymInt[] max_lengths, float padding_value=0.0) -> Tensor
at::Tensor _jagged_to_padded_dense_forward::call(const at::Tensor & values, at::TensorList offsets, c10::SymIntArrayRef max_lengths, double padding_value) {
    
    static auto op = create__jagged_to_padded_dense_forward_typed_handle();
    return op.call(values, offsets, max_lengths, padding_value);
}

// aten::_jagged_to_padded_dense_forward(Tensor values, Tensor[] offsets, SymInt[] max_lengths, float padding_value=0.0) -> Tensor
at::Tensor _jagged_to_padded_dense_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::TensorList offsets, c10::SymIntArrayRef max_lengths, double padding_value) {
    
    static auto op = create__jagged_to_padded_dense_forward_typed_handle();
    return op.redispatch(dispatchKeySet, values, offsets, max_lengths, padding_value);
}

// aten::_padded_dense_to_jagged_forward(Tensor dense, Tensor[] offsets, SymInt? total_L=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_padded_dense_to_jagged_forward::schema> create__padded_dense_to_jagged_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_padded_dense_to_jagged_forward::name, _padded_dense_to_jagged_forward::overload_name)
      .typed<_padded_dense_to_jagged_forward::schema>();
}

// aten::_padded_dense_to_jagged_forward(Tensor dense, Tensor[] offsets, SymInt? total_L=None) -> Tensor
at::Tensor _padded_dense_to_jagged_forward::call(const at::Tensor & dense, at::TensorList offsets, ::std::optional<c10::SymInt> total_L) {
    
    static auto op = create__padded_dense_to_jagged_forward_typed_handle();
    return op.call(dense, offsets, total_L);
}

// aten::_padded_dense_to_jagged_forward(Tensor dense, Tensor[] offsets, SymInt? total_L=None) -> Tensor
at::Tensor _padded_dense_to_jagged_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dense, at::TensorList offsets, ::std::optional<c10::SymInt> total_L) {
    
    static auto op = create__padded_dense_to_jagged_forward_typed_handle();
    return op.redispatch(dispatchKeySet, dense, offsets, total_L);
}

// aten::_nested_from_padded_tensor(Tensor padded, Tensor offsets, Tensor dummy, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None, SymInt? sum_S=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_from_padded_tensor::schema> create__nested_from_padded_tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_from_padded_tensor::name, _nested_from_padded_tensor::overload_name)
      .typed<_nested_from_padded_tensor::schema>();
}

// aten::_nested_from_padded_tensor(Tensor padded, Tensor offsets, Tensor dummy, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None, SymInt? sum_S=None) -> Tensor
at::Tensor _nested_from_padded_tensor::call(const at::Tensor & padded, const at::Tensor & offsets, const at::Tensor & dummy, int64_t ragged_idx, const ::std::optional<at::Tensor> & min_seqlen, const ::std::optional<at::Tensor> & max_seqlen, ::std::optional<c10::SymInt> sum_S) {
    
    static auto op = create__nested_from_padded_tensor_typed_handle();
    return op.call(padded, offsets, dummy, ragged_idx, min_seqlen, max_seqlen, sum_S);
}

// aten::_nested_from_padded_tensor(Tensor padded, Tensor offsets, Tensor dummy, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None, SymInt? sum_S=None) -> Tensor
at::Tensor _nested_from_padded_tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & offsets, const at::Tensor & dummy, int64_t ragged_idx, const ::std::optional<at::Tensor> & min_seqlen, const ::std::optional<at::Tensor> & max_seqlen, ::std::optional<c10::SymInt> sum_S) {
    
    static auto op = create__nested_from_padded_tensor_typed_handle();
    return op.redispatch(dispatchKeySet, padded, offsets, dummy, ragged_idx, min_seqlen, max_seqlen, sum_S);
}

// aten::_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_softmax_with_shape::schema> create__nested_tensor_softmax_with_shape_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_softmax_with_shape::name, _nested_tensor_softmax_with_shape::overload_name)
      .typed<_nested_tensor_softmax_with_shape::schema>();
}

// aten::_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor
at::Tensor _nested_tensor_softmax_with_shape::call(const at::Tensor & self, const at::Tensor & query) {
    
    static auto op = create__nested_tensor_softmax_with_shape_typed_handle();
    return op.call(self, query);
}

// aten::_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor
at::Tensor _nested_tensor_softmax_with_shape::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & query) {
    
    static auto op = create__nested_tensor_softmax_with_shape_typed_handle();
    return op.redispatch(dispatchKeySet, self, query);
}

// aten::_safe_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_safe_softmax::schema> create__safe_softmax_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_safe_softmax::name, _safe_softmax::overload_name)
      .typed<_safe_softmax::schema>();
}

// aten::_safe_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor _safe_softmax::call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__safe_softmax_typed_handle();
    return op.call(self, dim, dtype);
}

// aten::_safe_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
at::Tensor _safe_softmax::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
    
    static auto op = create__safe_softmax_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, dtype);
}

// aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_transformer_encoder_layer_fwd::schema> create__transformer_encoder_layer_fwd_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_transformer_encoder_layer_fwd::name, _transformer_encoder_layer_fwd::overload_name)
      .typed<_transformer_encoder_layer_fwd::schema>();
}

// aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor
at::Tensor _transformer_encoder_layer_fwd::call(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const ::std::optional<at::Tensor> & mask, ::std::optional<int64_t> mask_type) {
    
    static auto op = create__transformer_encoder_layer_fwd_typed_handle();
    return op.call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type);
}

// aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor
at::Tensor _transformer_encoder_layer_fwd::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const ::std::optional<at::Tensor> & mask, ::std::optional<int64_t> mask_type) {
    
    static auto op = create__transformer_encoder_layer_fwd_typed_handle();
    return op.redispatch(dispatchKeySet, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type);
}

// aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_native_multi_head_attention::schema> create__native_multi_head_attention_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_native_multi_head_attention::name, _native_multi_head_attention::overload_name)
      .typed<_native_multi_head_attention::schema>();
}

// aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _native_multi_head_attention::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, ::std::optional<int64_t> mask_type) {
    
    static auto op = create__native_multi_head_attention_typed_handle();
    return op.call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type);
}

// aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _native_multi_head_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, ::std::optional<int64_t> mask_type) {
    
    static auto op = create__native_multi_head_attention_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type);
}

// aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<scaled_dot_product_attention::schema> create_scaled_dot_product_attention_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scaled_dot_product_attention::name, scaled_dot_product_attention::overload_name)
      .typed<scaled_dot_product_attention::schema>();
}

// aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor
at::Tensor scaled_dot_product_attention::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, ::std::optional<double> scale, bool enable_gqa) {
    
    static auto op = create_scaled_dot_product_attention_typed_handle();
    return op.call(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa);
}

// aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor
at::Tensor scaled_dot_product_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, ::std::optional<double> scale, bool enable_gqa) {
    
    static auto op = create_scaled_dot_product_attention_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa);
}

// aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> int
static C10_NOINLINE c10::TypedOperatorHandle<_fused_sdp_choice::schema> create__fused_sdp_choice_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_sdp_choice::name, _fused_sdp_choice::overload_name)
      .typed<_fused_sdp_choice::schema>();
}

// aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> int
int64_t _fused_sdp_choice::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, ::std::optional<double> scale, bool enable_gqa) {
    
    static auto op = create__fused_sdp_choice_typed_handle();
    return op.call(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa);
}

// aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> int
int64_t _fused_sdp_choice::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, ::std::optional<double> scale, bool enable_gqa) {
    
    static auto op = create__fused_sdp_choice_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa);
}

// aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None, bool enable_gqa=False) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_attention_math::schema> create__scaled_dot_product_attention_math_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_dot_product_attention_math::name, _scaled_dot_product_attention_math::overload_name)
      .typed<_scaled_dot_product_attention_math::schema>();
}

// aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None, bool enable_gqa=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & dropout_mask, ::std::optional<double> scale, bool enable_gqa) {
    
    static auto op = create__scaled_dot_product_attention_math_typed_handle();
    return op.call(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale, enable_gqa);
}

// aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None, bool enable_gqa=False) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & dropout_mask, ::std::optional<double> scale, bool enable_gqa) {
    
    static auto op = create__scaled_dot_product_attention_math_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale, enable_gqa);
}

// aten::_scaled_dot_product_attention_math_for_mps(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None) -> (Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_attention_math_for_mps::schema> create__scaled_dot_product_attention_math_for_mps_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_dot_product_attention_math_for_mps::name, _scaled_dot_product_attention_math_for_mps::overload_name)
      .typed<_scaled_dot_product_attention_math_for_mps::schema>();
}

// aten::_scaled_dot_product_attention_math_for_mps(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math_for_mps::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & dropout_mask, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_attention_math_for_mps_typed_handle();
    return op.call(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale);
}

// aten::_scaled_dot_product_attention_math_for_mps(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None) -> (Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math_for_mps::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & dropout_mask, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_attention_math_for_mps_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale);
}

// aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor rng_state, Tensor unused, Tensor debug_attn_mask)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_flash_attention::schema> create__scaled_dot_product_flash_attention_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_dot_product_flash_attention::name, _scaled_dot_product_flash_attention::overload_name)
      .typed<_scaled_dot_product_flash_attention::schema>();
}

// aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor rng_state, Tensor unused, Tensor debug_attn_mask)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_flash_attention_typed_handle();
    return op.call(query, key, value, dropout_p, is_causal, return_debug_mask, scale);
}

// aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor rng_state, Tensor unused, Tensor debug_attn_mask)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_flash_attention_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, dropout_p, is_causal, return_debug_mask, scale);
}

// aten::_scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_flash_attention_for_cpu::schema> create__scaled_dot_product_flash_attention_for_cpu_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_dot_product_flash_attention_for_cpu::name, _scaled_dot_product_flash_attention_for_cpu::overload_name)
      .typed<_scaled_dot_product_flash_attention_for_cpu::schema>();
}

// aten::_scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp)
::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_for_cpu::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & attn_mask, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_flash_attention_for_cpu_typed_handle();
    return op.call(query, key, value, dropout_p, is_causal, attn_mask, scale);
}

// aten::_scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp)
::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_for_cpu::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & attn_mask, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_flash_attention_for_cpu_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, dropout_p, is_causal, attn_mask, scale);
}

// aten::_scaled_dot_product_fused_attention_overrideable(Tensor query, Tensor key, Tensor value, Tensor? attn_bias=None, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_fused_attention_overrideable::schema> create__scaled_dot_product_fused_attention_overrideable_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_dot_product_fused_attention_overrideable::name, _scaled_dot_product_fused_attention_overrideable::overload_name)
      .typed<_scaled_dot_product_fused_attention_overrideable::schema>();
}

// aten::_scaled_dot_product_fused_attention_overrideable(Tensor query, Tensor key, Tensor value, Tensor? attn_bias=None, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_fused_attention_overrideable::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_bias, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_fused_attention_overrideable_typed_handle();
    return op.call(query, key, value, attn_bias, dropout_p, is_causal, return_debug_mask, scale);
}

// aten::_scaled_dot_product_fused_attention_overrideable(Tensor query, Tensor key, Tensor value, Tensor? attn_bias=None, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_fused_attention_overrideable::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_bias, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_fused_attention_overrideable_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, attn_bias, dropout_p, is_causal, return_debug_mask, scale);
}

// aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_flash_attention_backward::schema> create__scaled_dot_product_flash_attention_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_dot_product_flash_attention_backward::name, _scaled_dot_product_flash_attention_backward::overload_name)
      .typed<_scaled_dot_product_flash_attention_backward::schema>();
}

// aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_backward::call(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_flash_attention_backward_typed_handle();
    return op.call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale);
}

// aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_flash_attention_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale);
}

// aten::_scaled_dot_product_flash_attention_for_cpu_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, float dropout_p, bool is_causal, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_flash_attention_for_cpu_backward::schema> create__scaled_dot_product_flash_attention_for_cpu_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_dot_product_flash_attention_for_cpu_backward::name, _scaled_dot_product_flash_attention_for_cpu_backward::overload_name)
      .typed<_scaled_dot_product_flash_attention_for_cpu_backward::schema>();
}

// aten::_scaled_dot_product_flash_attention_for_cpu_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, float dropout_p, bool is_causal, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_for_cpu_backward::call(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & attn_mask, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_flash_attention_for_cpu_backward_typed_handle();
    return op.call(grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask, scale);
}

// aten::_scaled_dot_product_flash_attention_for_cpu_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, float dropout_p, bool is_causal, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_for_cpu_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & attn_mask, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_flash_attention_for_cpu_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask, scale);
}

// aten::_scaled_dot_product_fused_attention_overrideable_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor attn_bias, bool[4] grad_input_mask, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value, Tensor grad_attn_bias)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_fused_attention_overrideable_backward::schema> create__scaled_dot_product_fused_attention_overrideable_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_dot_product_fused_attention_overrideable_backward::name, _scaled_dot_product_fused_attention_overrideable_backward::overload_name)
      .typed<_scaled_dot_product_fused_attention_overrideable_backward::schema>();
}

// aten::_scaled_dot_product_fused_attention_overrideable_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor attn_bias, bool[4] grad_input_mask, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value, Tensor grad_attn_bias)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_fused_attention_overrideable_backward::call(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, ::std::array<bool,4> grad_input_mask, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_fused_attention_overrideable_backward_typed_handle();
    return op.call(grad_out, query, key, value, attn_bias, grad_input_mask, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale);
}

// aten::_scaled_dot_product_fused_attention_overrideable_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor attn_bias, bool[4] grad_input_mask, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value, Tensor grad_attn_bias)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_fused_attention_overrideable_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, ::std::array<bool,4> grad_input_mask, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_fused_attention_overrideable_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, query, key, value, attn_bias, grad_input_mask, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale);
}

// aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_efficient_attention::schema> create__scaled_dot_product_efficient_attention_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_dot_product_efficient_attention::name, _scaled_dot_product_efficient_attention::overload_name)
      .typed<_scaled_dot_product_efficient_attention::schema>();
}

// aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_efficient_attention_typed_handle();
    return op.call(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, scale);
}

// aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_efficient_attention_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, scale);
}

// aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor attn_bias, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, float dropout_p, bool[4] grad_input_mask, bool is_causal=False, *, float? scale=None) -> (Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_efficient_attention_backward::schema> create__scaled_dot_product_efficient_attention_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_dot_product_efficient_attention_backward::name, _scaled_dot_product_efficient_attention_backward::overload_name)
      .typed<_scaled_dot_product_efficient_attention_backward::schema>();
}

// aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor attn_bias, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, float dropout_p, bool[4] grad_input_mask, bool is_causal=False, *, float? scale=None) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_backward::call(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array<bool,4> grad_input_mask, bool is_causal, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_efficient_attention_backward_typed_handle();
    return op.call(grad_out_, query, key, value, attn_bias, out, logsumexp, philox_seed, philox_offset, dropout_p, grad_input_mask, is_causal, scale);
}

// aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor attn_bias, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, float dropout_p, bool[4] grad_input_mask, bool is_causal=False, *, float? scale=None) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array<bool,4> grad_input_mask, bool is_causal, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_efficient_attention_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out_, query, key, value, attn_bias, out, logsumexp, philox_seed, philox_offset, dropout_p, grad_input_mask, is_causal, scale);
}

// aten::_scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_cudnn_attention::schema> create__scaled_dot_product_cudnn_attention_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_dot_product_cudnn_attention::name, _scaled_dot_product_cudnn_attention::overload_name)
      .typed<_scaled_dot_product_cudnn_attention::schema>();
}

// aten::_scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_cudnn_attention_typed_handle();
    return op.call(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, return_debug_mask, scale);
}

// aten::_scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_cudnn_attention_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, return_debug_mask, scale);
}

// aten::_scaled_dot_product_cudnn_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor attn_bias, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, *, float? scale=None) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_scaled_dot_product_cudnn_attention_backward::schema> create__scaled_dot_product_cudnn_attention_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_scaled_dot_product_cudnn_attention_backward::name, _scaled_dot_product_cudnn_attention_backward::overload_name)
      .typed<_scaled_dot_product_cudnn_attention_backward::schema>();
}

// aten::_scaled_dot_product_cudnn_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor attn_bias, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, *, float? scale=None) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention_backward::call(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, const at::Tensor & attn_bias, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_cudnn_attention_backward_typed_handle();
    return op.call(grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale);
}

// aten::_scaled_dot_product_cudnn_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor attn_bias, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, *, float? scale=None) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, const at::Tensor & attn_bias, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, ::std::optional<double> scale) {
    
    static auto op = create__scaled_dot_product_cudnn_attention_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale);
}

// aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, bool return_debug_mask, *, float? scale=None, SymInt? window_size_left=None, SymInt? window_size_right=None, Tensor? seqused_k=None, Tensor? alibi_slopes=None) -> (Tensor output, Tensor softmax_logsumexp, Tensor rng_state, Tensor unused, Tensor debug_attn_mask)
static C10_NOINLINE c10::TypedOperatorHandle<_flash_attention_forward::schema> create__flash_attention_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_flash_attention_forward::name, _flash_attention_forward::overload_name)
      .typed<_flash_attention_forward::schema>();
}

// aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, bool return_debug_mask, *, float? scale=None, SymInt? window_size_left=None, SymInt? window_size_right=None, Tensor? seqused_k=None, Tensor? alibi_slopes=None) -> (Tensor output, Tensor softmax_logsumexp, Tensor rng_state, Tensor unused, Tensor debug_attn_mask)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _flash_attention_forward::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & cum_seq_q, const ::std::optional<at::Tensor> & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale, ::std::optional<c10::SymInt> window_size_left, ::std::optional<c10::SymInt> window_size_right, const ::std::optional<at::Tensor> & seqused_k, const ::std::optional<at::Tensor> & alibi_slopes) {
    
    static auto op = create__flash_attention_forward_typed_handle();
    return op.call(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale, window_size_left, window_size_right, seqused_k, alibi_slopes);
}

// aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, bool return_debug_mask, *, float? scale=None, SymInt? window_size_left=None, SymInt? window_size_right=None, Tensor? seqused_k=None, Tensor? alibi_slopes=None) -> (Tensor output, Tensor softmax_logsumexp, Tensor rng_state, Tensor unused, Tensor debug_attn_mask)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _flash_attention_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & cum_seq_q, const ::std::optional<at::Tensor> & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale, ::std::optional<c10::SymInt> window_size_left, ::std::optional<c10::SymInt> window_size_right, const ::std::optional<at::Tensor> & seqused_k, const ::std::optional<at::Tensor> & alibi_slopes) {
    
    static auto op = create__flash_attention_forward_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale, window_size_left, window_size_right, seqused_k, alibi_slopes);
}

// aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor rng_state, Tensor unused, *, float? scale=None, SymInt? window_size_left=None, SymInt? window_size_right=None) -> (Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_flash_attention_backward::schema> create__flash_attention_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_flash_attention_backward::name, _flash_attention_backward::overload_name)
      .typed<_flash_attention_backward::schema>();
}

// aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor rng_state, Tensor unused, *, float? scale=None, SymInt? window_size_left=None, SymInt? window_size_right=None) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _flash_attention_backward::call(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & rng_state, const at::Tensor & unused, ::std::optional<double> scale, ::std::optional<c10::SymInt> window_size_left, ::std::optional<c10::SymInt> window_size_right) {
    
    static auto op = create__flash_attention_backward_typed_handle();
    return op.call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, rng_state, unused, scale, window_size_left, window_size_right);
}

// aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor rng_state, Tensor unused, *, float? scale=None, SymInt? window_size_left=None, SymInt? window_size_right=None) -> (Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _flash_attention_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & rng_state, const at::Tensor & unused, ::std::optional<double> scale, ::std::optional<c10::SymInt> window_size_left, ::std::optional<c10::SymInt> window_size_right) {
    
    static auto op = create__flash_attention_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, rng_state, unused, scale, window_size_left, window_size_right);
}

// aten::_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt? max_seqlen_q, SymInt? max_seqlen_k, float dropout_p, int custom_mask_type, bool compute_log_sumexp=False, *, float? scale=None, Tensor? seqlen_k=None, int? window_size=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, SymInt max_seqlen_batch_q, SymInt max_seqlen_batch_k)
static C10_NOINLINE c10::TypedOperatorHandle<_efficient_attention_forward::schema> create__efficient_attention_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_efficient_attention_forward::name, _efficient_attention_forward::overload_name)
      .typed<_efficient_attention_forward::schema>();
}

// aten::_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt? max_seqlen_q, SymInt? max_seqlen_k, float dropout_p, int custom_mask_type, bool compute_log_sumexp=False, *, float? scale=None, Tensor? seqlen_k=None, int? window_size=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, SymInt max_seqlen_batch_q, SymInt max_seqlen_batch_k)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt> _efficient_attention_forward::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & cu_seqlens_q, const ::std::optional<at::Tensor> & cu_seqlens_k, ::std::optional<c10::SymInt> max_seqlen_q, ::std::optional<c10::SymInt> max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp, ::std::optional<double> scale, const ::std::optional<at::Tensor> & seqlen_k, ::std::optional<int64_t> window_size) {
    
    static auto op = create__efficient_attention_forward_typed_handle();
    return op.call(query, key, value, bias, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, custom_mask_type, compute_log_sumexp, scale, seqlen_k, window_size);
}

// aten::_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt? max_seqlen_q, SymInt? max_seqlen_k, float dropout_p, int custom_mask_type, bool compute_log_sumexp=False, *, float? scale=None, Tensor? seqlen_k=None, int? window_size=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, SymInt max_seqlen_batch_q, SymInt max_seqlen_batch_k)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt> _efficient_attention_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & cu_seqlens_q, const ::std::optional<at::Tensor> & cu_seqlens_k, ::std::optional<c10::SymInt> max_seqlen_q, ::std::optional<c10::SymInt> max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp, ::std::optional<double> scale, const ::std::optional<at::Tensor> & seqlen_k, ::std::optional<int64_t> window_size) {
    
    static auto op = create__efficient_attention_forward_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, bias, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, custom_mask_type, compute_log_sumexp, scale, seqlen_k, window_size);
}

// aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor out, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt max_seqlen_q, SymInt max_seqlen_k, Tensor logsumexp, float dropout_p, Tensor philox_seed, Tensor philox_offset, int custom_mask_type, bool bias_requires_grad, *, float? scale=None, int? num_splits_key=None, int? window_size=None, bool shared_storage_dqdkdv=False) -> (Tensor, Tensor, Tensor, Tensor)
static C10_NOINLINE c10::TypedOperatorHandle<_efficient_attention_backward::schema> create__efficient_attention_backward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_efficient_attention_backward::name, _efficient_attention_backward::overload_name)
      .typed<_efficient_attention_backward::schema>();
}

// aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor out, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt max_seqlen_q, SymInt max_seqlen_k, Tensor logsumexp, float dropout_p, Tensor philox_seed, Tensor philox_offset, int custom_mask_type, bool bias_requires_grad, *, float? scale=None, int? num_splits_key=None, int? window_size=None, bool shared_storage_dqdkdv=False) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward::call(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & bias, const at::Tensor & out, const ::std::optional<at::Tensor> & cu_seqlens_q, const ::std::optional<at::Tensor> & cu_seqlens_k, c10::SymInt max_seqlen_q, c10::SymInt max_seqlen_k, const at::Tensor & logsumexp, double dropout_p, const at::Tensor & philox_seed, const at::Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, ::std::optional<double> scale, ::std::optional<int64_t> num_splits_key, ::std::optional<int64_t> window_size, bool shared_storage_dqdkdv) {
    
    static auto op = create__efficient_attention_backward_typed_handle();
    return op.call(grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key, window_size, shared_storage_dqdkdv);
}

// aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor out, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt max_seqlen_q, SymInt max_seqlen_k, Tensor logsumexp, float dropout_p, Tensor philox_seed, Tensor philox_offset, int custom_mask_type, bool bias_requires_grad, *, float? scale=None, int? num_splits_key=None, int? window_size=None, bool shared_storage_dqdkdv=False) -> (Tensor, Tensor, Tensor, Tensor)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & bias, const at::Tensor & out, const ::std::optional<at::Tensor> & cu_seqlens_q, const ::std::optional<at::Tensor> & cu_seqlens_k, c10::SymInt max_seqlen_q, c10::SymInt max_seqlen_k, const at::Tensor & logsumexp, double dropout_p, const at::Tensor & philox_seed, const at::Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, ::std::optional<double> scale, ::std::optional<int64_t> num_splits_key, ::std::optional<int64_t> window_size, bool shared_storage_dqdkdv) {
    
    static auto op = create__efficient_attention_backward_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key, window_size, shared_storage_dqdkdv);
}

// aten::_cudnn_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_attention_forward::schema> create__cudnn_attention_forward_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cudnn_attention_forward::name, _cudnn_attention_forward::overload_name)
      .typed<_cudnn_attention_forward::schema>();
}

// aten::_cudnn_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> _cudnn_attention_forward::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_bias, const ::std::optional<at::Tensor> & cum_seq_q, const ::std::optional<at::Tensor> & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, bool compute_log_sumexp, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale) {
    
    static auto op = create__cudnn_attention_forward_typed_handle();
    return op.call(query, key, value, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, compute_log_sumexp, dropout_p, is_causal, return_debug_mask, scale);
}

// aten::_cudnn_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, Tensor? cum_seq_q, Tensor? cum_seq_k, SymInt max_q, SymInt max_k, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> _cudnn_attention_forward::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_bias, const ::std::optional<at::Tensor> & cum_seq_q, const ::std::optional<at::Tensor> & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, bool compute_log_sumexp, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale) {
    
    static auto op = create__cudnn_attention_forward_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, compute_log_sumexp, dropout_p, is_causal, return_debug_mask, scale);
}

// aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_triton_scaled_dot_attention::schema> create__triton_scaled_dot_attention_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_triton_scaled_dot_attention::name, _triton_scaled_dot_attention::overload_name)
      .typed<_triton_scaled_dot_attention::schema>();
}

// aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor
at::Tensor _triton_scaled_dot_attention::call(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p) {
    
    static auto op = create__triton_scaled_dot_attention_typed_handle();
    return op.call(q, k, v, dropout_p);
}

// aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor
at::Tensor _triton_scaled_dot_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p) {
    
    static auto op = create__triton_scaled_dot_attention_typed_handle();
    return op.redispatch(dispatchKeySet, q, k, v, dropout_p);
}

// aten::_fill_mem_eff_dropout_mask_(Tensor(a!) self, float dropout_p, int seed, int offset) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_fill_mem_eff_dropout_mask_::schema> create__fill_mem_eff_dropout_mask__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fill_mem_eff_dropout_mask_::name, _fill_mem_eff_dropout_mask_::overload_name)
      .typed<_fill_mem_eff_dropout_mask_::schema>();
}

// aten::_fill_mem_eff_dropout_mask_(Tensor(a!) self, float dropout_p, int seed, int offset) -> Tensor(a!)
at::Tensor & _fill_mem_eff_dropout_mask_::call(at::Tensor & self, double dropout_p, int64_t seed, int64_t offset) {
    
    static auto op = create__fill_mem_eff_dropout_mask__typed_handle();
    return op.call(self, dropout_p, seed, offset);
}

// aten::_fill_mem_eff_dropout_mask_(Tensor(a!) self, float dropout_p, int seed, int offset) -> Tensor(a!)
at::Tensor & _fill_mem_eff_dropout_mask_::redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double dropout_p, int64_t seed, int64_t offset) {
    
    static auto op = create__fill_mem_eff_dropout_mask__typed_handle();
    return op.redispatch(dispatchKeySet, self, dropout_p, seed, offset);
}

// aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_triton_multi_head_attention::schema> create__triton_multi_head_attention_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_triton_multi_head_attention::name, _triton_multi_head_attention::overload_name)
      .typed<_triton_multi_head_attention::schema>();
}

// aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor
at::Tensor _triton_multi_head_attention::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional<at::Tensor> & mask) {
    
    static auto op = create__triton_multi_head_attention_typed_handle();
    return op.call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask);
}

// aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor
at::Tensor _triton_multi_head_attention::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional<at::Tensor> & mask) {
    
    static auto op = create__triton_multi_head_attention_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask);
}

// aten::special_airy_ai(Tensor x) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_airy_ai::schema> create_special_airy_ai_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_airy_ai::name, special_airy_ai::overload_name)
      .typed<special_airy_ai::schema>();
}

// aten::special_airy_ai(Tensor x) -> Tensor
at::Tensor special_airy_ai::call(const at::Tensor & x) {
    
    static auto op = create_special_airy_ai_typed_handle();
    return op.call(x);
}

// aten::special_airy_ai(Tensor x) -> Tensor
at::Tensor special_airy_ai::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) {
    
    static auto op = create_special_airy_ai_typed_handle();
    return op.redispatch(dispatchKeySet, x);
}

// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_airy_ai_out::schema> create_special_airy_ai_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_airy_ai_out::name, special_airy_ai_out::overload_name)
      .typed<special_airy_ai_out::schema>();
}

// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_airy_ai_out::call(const at::Tensor & x, at::Tensor & out) {
    
    static auto op = create_special_airy_ai_out_typed_handle();
    return op.call(x, out);
}

// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_airy_ai_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) {
    
    static auto op = create_special_airy_ai_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, out);
}

// aten::special_bessel_j0(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_j0::schema> create_special_bessel_j0_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_bessel_j0::name, special_bessel_j0::overload_name)
      .typed<special_bessel_j0::schema>();
}

// aten::special_bessel_j0(Tensor self) -> Tensor
at::Tensor special_bessel_j0::call(const at::Tensor & self) {
    
    static auto op = create_special_bessel_j0_typed_handle();
    return op.call(self);
}

// aten::special_bessel_j0(Tensor self) -> Tensor
at::Tensor special_bessel_j0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_bessel_j0_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_j0_out::schema> create_special_bessel_j0_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_bessel_j0_out::name, special_bessel_j0_out::overload_name)
      .typed<special_bessel_j0_out::schema>();
}

// aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_bessel_j0_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_bessel_j0_out_typed_handle();
    return op.call(self, out);
}

// aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_bessel_j0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_bessel_j0_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_bessel_j1(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_j1::schema> create_special_bessel_j1_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_bessel_j1::name, special_bessel_j1::overload_name)
      .typed<special_bessel_j1::schema>();
}

// aten::special_bessel_j1(Tensor self) -> Tensor
at::Tensor special_bessel_j1::call(const at::Tensor & self) {
    
    static auto op = create_special_bessel_j1_typed_handle();
    return op.call(self);
}

// aten::special_bessel_j1(Tensor self) -> Tensor
at::Tensor special_bessel_j1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_bessel_j1_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_j1_out::schema> create_special_bessel_j1_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_bessel_j1_out::name, special_bessel_j1_out::overload_name)
      .typed<special_bessel_j1_out::schema>();
}

// aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_bessel_j1_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_bessel_j1_out_typed_handle();
    return op.call(self, out);
}

// aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_bessel_j1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_bessel_j1_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_bessel_y0(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_y0::schema> create_special_bessel_y0_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_bessel_y0::name, special_bessel_y0::overload_name)
      .typed<special_bessel_y0::schema>();
}

// aten::special_bessel_y0(Tensor self) -> Tensor
at::Tensor special_bessel_y0::call(const at::Tensor & self) {
    
    static auto op = create_special_bessel_y0_typed_handle();
    return op.call(self);
}

// aten::special_bessel_y0(Tensor self) -> Tensor
at::Tensor special_bessel_y0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_bessel_y0_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_y0_out::schema> create_special_bessel_y0_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_bessel_y0_out::name, special_bessel_y0_out::overload_name)
      .typed<special_bessel_y0_out::schema>();
}

// aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_bessel_y0_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_bessel_y0_out_typed_handle();
    return op.call(self, out);
}

// aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_bessel_y0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_bessel_y0_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_bessel_y1(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_y1::schema> create_special_bessel_y1_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_bessel_y1::name, special_bessel_y1::overload_name)
      .typed<special_bessel_y1::schema>();
}

// aten::special_bessel_y1(Tensor self) -> Tensor
at::Tensor special_bessel_y1::call(const at::Tensor & self) {
    
    static auto op = create_special_bessel_y1_typed_handle();
    return op.call(self);
}

// aten::special_bessel_y1(Tensor self) -> Tensor
at::Tensor special_bessel_y1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_bessel_y1_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_bessel_y1_out::schema> create_special_bessel_y1_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_bessel_y1_out::name, special_bessel_y1_out::overload_name)
      .typed<special_bessel_y1_out::schema>();
}

// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_bessel_y1_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_bessel_y1_out_typed_handle();
    return op.call(self, out);
}

// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_bessel_y1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_bessel_y1_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_t::schema> create_special_chebyshev_polynomial_t_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_t::name, special_chebyshev_polynomial_t::overload_name)
      .typed<special_chebyshev_polynomial_t::schema>();
}

// aten::special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_t::call(const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_t_typed_handle();
    return op.call(x, n);
}

// aten::special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_t::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_t_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_t_x_scalar::schema> create_special_chebyshev_polynomial_t_x_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_t_x_scalar::name, special_chebyshev_polynomial_t_x_scalar::overload_name)
      .typed<special_chebyshev_polynomial_t_x_scalar::schema>();
}

// aten::special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_t_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_t_x_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_t_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_t_x_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_t_n_scalar::schema> create_special_chebyshev_polynomial_t_n_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_t_n_scalar::name, special_chebyshev_polynomial_t_n_scalar::overload_name)
      .typed<special_chebyshev_polynomial_t_n_scalar::schema>();
}

// aten::special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_chebyshev_polynomial_t_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_chebyshev_polynomial_t_n_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_chebyshev_polynomial_t_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_chebyshev_polynomial_t_n_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_t_out::schema> create_special_chebyshev_polynomial_t_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_t_out::name, special_chebyshev_polynomial_t_out::overload_name)
      .typed<special_chebyshev_polynomial_t_out::schema>();
}

// aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_t_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_t_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_t_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_t_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_t_x_scalar_out::schema> create_special_chebyshev_polynomial_t_x_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_t_x_scalar_out::name, special_chebyshev_polynomial_t_x_scalar_out::overload_name)
      .typed<special_chebyshev_polynomial_t_x_scalar_out::schema>();
}

// aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_t_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_t_x_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_t_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_t_x_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_t_n_scalar_out::schema> create_special_chebyshev_polynomial_t_n_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_t_n_scalar_out::name, special_chebyshev_polynomial_t_n_scalar_out::overload_name)
      .typed<special_chebyshev_polynomial_t_n_scalar_out::schema>();
}

// aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_t_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_t_n_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_t_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_t_n_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_u::schema> create_special_chebyshev_polynomial_u_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_u::name, special_chebyshev_polynomial_u::overload_name)
      .typed<special_chebyshev_polynomial_u::schema>();
}

// aten::special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_u::call(const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_u_typed_handle();
    return op.call(x, n);
}

// aten::special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_u::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_u_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_u_x_scalar::schema> create_special_chebyshev_polynomial_u_x_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_u_x_scalar::name, special_chebyshev_polynomial_u_x_scalar::overload_name)
      .typed<special_chebyshev_polynomial_u_x_scalar::schema>();
}

// aten::special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_u_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_u_x_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_u_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_u_x_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_u_n_scalar::schema> create_special_chebyshev_polynomial_u_n_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_u_n_scalar::name, special_chebyshev_polynomial_u_n_scalar::overload_name)
      .typed<special_chebyshev_polynomial_u_n_scalar::schema>();
}

// aten::special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_chebyshev_polynomial_u_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_chebyshev_polynomial_u_n_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_chebyshev_polynomial_u_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_chebyshev_polynomial_u_n_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_u_out::schema> create_special_chebyshev_polynomial_u_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_u_out::name, special_chebyshev_polynomial_u_out::overload_name)
      .typed<special_chebyshev_polynomial_u_out::schema>();
}

// aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_u_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_u_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_u_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_u_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_u_x_scalar_out::schema> create_special_chebyshev_polynomial_u_x_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_u_x_scalar_out::name, special_chebyshev_polynomial_u_x_scalar_out::overload_name)
      .typed<special_chebyshev_polynomial_u_x_scalar_out::schema>();
}

// aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_u_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_u_x_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_u_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_u_x_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_u_n_scalar_out::schema> create_special_chebyshev_polynomial_u_n_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_u_n_scalar_out::name, special_chebyshev_polynomial_u_n_scalar_out::overload_name)
      .typed<special_chebyshev_polynomial_u_n_scalar_out::schema>();
}

// aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_u_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_u_n_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_u_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_u_n_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_v::schema> create_special_chebyshev_polynomial_v_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_v::name, special_chebyshev_polynomial_v::overload_name)
      .typed<special_chebyshev_polynomial_v::schema>();
}

// aten::special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_v::call(const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_v_typed_handle();
    return op.call(x, n);
}

// aten::special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_v::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_v_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_v_x_scalar::schema> create_special_chebyshev_polynomial_v_x_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_v_x_scalar::name, special_chebyshev_polynomial_v_x_scalar::overload_name)
      .typed<special_chebyshev_polynomial_v_x_scalar::schema>();
}

// aten::special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_v_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_v_x_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_v_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_v_x_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_v_n_scalar::schema> create_special_chebyshev_polynomial_v_n_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_v_n_scalar::name, special_chebyshev_polynomial_v_n_scalar::overload_name)
      .typed<special_chebyshev_polynomial_v_n_scalar::schema>();
}

// aten::special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_chebyshev_polynomial_v_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_chebyshev_polynomial_v_n_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_chebyshev_polynomial_v_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_chebyshev_polynomial_v_n_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_v_out::schema> create_special_chebyshev_polynomial_v_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_v_out::name, special_chebyshev_polynomial_v_out::overload_name)
      .typed<special_chebyshev_polynomial_v_out::schema>();
}

// aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_v_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_v_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_v_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_v_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_v_x_scalar_out::schema> create_special_chebyshev_polynomial_v_x_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_v_x_scalar_out::name, special_chebyshev_polynomial_v_x_scalar_out::overload_name)
      .typed<special_chebyshev_polynomial_v_x_scalar_out::schema>();
}

// aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_v_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_v_x_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_v_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_v_x_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_v_n_scalar_out::schema> create_special_chebyshev_polynomial_v_n_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_v_n_scalar_out::name, special_chebyshev_polynomial_v_n_scalar_out::overload_name)
      .typed<special_chebyshev_polynomial_v_n_scalar_out::schema>();
}

// aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_v_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_v_n_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_v_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_v_n_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_w::schema> create_special_chebyshev_polynomial_w_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_w::name, special_chebyshev_polynomial_w::overload_name)
      .typed<special_chebyshev_polynomial_w::schema>();
}

// aten::special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_w::call(const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_w_typed_handle();
    return op.call(x, n);
}

// aten::special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_w::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_w_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_w_x_scalar::schema> create_special_chebyshev_polynomial_w_x_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_w_x_scalar::name, special_chebyshev_polynomial_w_x_scalar::overload_name)
      .typed<special_chebyshev_polynomial_w_x_scalar::schema>();
}

// aten::special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_w_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_w_x_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_chebyshev_polynomial_w_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_chebyshev_polynomial_w_x_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_w_n_scalar::schema> create_special_chebyshev_polynomial_w_n_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_w_n_scalar::name, special_chebyshev_polynomial_w_n_scalar::overload_name)
      .typed<special_chebyshev_polynomial_w_n_scalar::schema>();
}

// aten::special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_chebyshev_polynomial_w_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_chebyshev_polynomial_w_n_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_chebyshev_polynomial_w_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_chebyshev_polynomial_w_n_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_w_out::schema> create_special_chebyshev_polynomial_w_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_w_out::name, special_chebyshev_polynomial_w_out::overload_name)
      .typed<special_chebyshev_polynomial_w_out::schema>();
}

// aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_w_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_w_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_w_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_w_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_w_x_scalar_out::schema> create_special_chebyshev_polynomial_w_x_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_w_x_scalar_out::name, special_chebyshev_polynomial_w_x_scalar_out::overload_name)
      .typed<special_chebyshev_polynomial_w_x_scalar_out::schema>();
}

// aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_w_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_w_x_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_w_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_w_x_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_chebyshev_polynomial_w_n_scalar_out::schema> create_special_chebyshev_polynomial_w_n_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_chebyshev_polynomial_w_n_scalar_out::name, special_chebyshev_polynomial_w_n_scalar_out::overload_name)
      .typed<special_chebyshev_polynomial_w_n_scalar_out::schema>();
}

// aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_w_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_w_n_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_chebyshev_polynomial_w_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_chebyshev_polynomial_w_n_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_h::schema> create_special_hermite_polynomial_h_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_hermite_polynomial_h::name, special_hermite_polynomial_h::overload_name)
      .typed<special_hermite_polynomial_h::schema>();
}

// aten::special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor
at::Tensor special_hermite_polynomial_h::call(const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_hermite_polynomial_h_typed_handle();
    return op.call(x, n);
}

// aten::special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor
at::Tensor special_hermite_polynomial_h::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_hermite_polynomial_h_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_h_x_scalar::schema> create_special_hermite_polynomial_h_x_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_hermite_polynomial_h_x_scalar::name, special_hermite_polynomial_h_x_scalar::overload_name)
      .typed<special_hermite_polynomial_h_x_scalar::schema>();
}

// aten::special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_hermite_polynomial_h_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_hermite_polynomial_h_x_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_hermite_polynomial_h_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_hermite_polynomial_h_x_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_h_n_scalar::schema> create_special_hermite_polynomial_h_n_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_hermite_polynomial_h_n_scalar::name, special_hermite_polynomial_h_n_scalar::overload_name)
      .typed<special_hermite_polynomial_h_n_scalar::schema>();
}

// aten::special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_hermite_polynomial_h_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_hermite_polynomial_h_n_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_hermite_polynomial_h_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_hermite_polynomial_h_n_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_h_out::schema> create_special_hermite_polynomial_h_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_hermite_polynomial_h_out::name, special_hermite_polynomial_h_out::overload_name)
      .typed<special_hermite_polynomial_h_out::schema>();
}

// aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_hermite_polynomial_h_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_hermite_polynomial_h_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_hermite_polynomial_h_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_hermite_polynomial_h_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_h_x_scalar_out::schema> create_special_hermite_polynomial_h_x_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_hermite_polynomial_h_x_scalar_out::name, special_hermite_polynomial_h_x_scalar_out::overload_name)
      .typed<special_hermite_polynomial_h_x_scalar_out::schema>();
}

// aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_hermite_polynomial_h_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_hermite_polynomial_h_x_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_hermite_polynomial_h_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_hermite_polynomial_h_x_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_h_n_scalar_out::schema> create_special_hermite_polynomial_h_n_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_hermite_polynomial_h_n_scalar_out::name, special_hermite_polynomial_h_n_scalar_out::overload_name)
      .typed<special_hermite_polynomial_h_n_scalar_out::schema>();
}

// aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_hermite_polynomial_h_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_hermite_polynomial_h_n_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_hermite_polynomial_h_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_hermite_polynomial_h_n_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_he::schema> create_special_hermite_polynomial_he_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_hermite_polynomial_he::name, special_hermite_polynomial_he::overload_name)
      .typed<special_hermite_polynomial_he::schema>();
}

// aten::special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor
at::Tensor special_hermite_polynomial_he::call(const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_hermite_polynomial_he_typed_handle();
    return op.call(x, n);
}

// aten::special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor
at::Tensor special_hermite_polynomial_he::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_hermite_polynomial_he_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_he_x_scalar::schema> create_special_hermite_polynomial_he_x_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_hermite_polynomial_he_x_scalar::name, special_hermite_polynomial_he_x_scalar::overload_name)
      .typed<special_hermite_polynomial_he_x_scalar::schema>();
}

// aten::special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_hermite_polynomial_he_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_hermite_polynomial_he_x_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_hermite_polynomial_he_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_hermite_polynomial_he_x_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_he_n_scalar::schema> create_special_hermite_polynomial_he_n_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_hermite_polynomial_he_n_scalar::name, special_hermite_polynomial_he_n_scalar::overload_name)
      .typed<special_hermite_polynomial_he_n_scalar::schema>();
}

// aten::special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_hermite_polynomial_he_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_hermite_polynomial_he_n_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_hermite_polynomial_he_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_hermite_polynomial_he_n_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_he_out::schema> create_special_hermite_polynomial_he_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_hermite_polynomial_he_out::name, special_hermite_polynomial_he_out::overload_name)
      .typed<special_hermite_polynomial_he_out::schema>();
}

// aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_hermite_polynomial_he_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_hermite_polynomial_he_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_hermite_polynomial_he_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_hermite_polynomial_he_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_he_x_scalar_out::schema> create_special_hermite_polynomial_he_x_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_hermite_polynomial_he_x_scalar_out::name, special_hermite_polynomial_he_x_scalar_out::overload_name)
      .typed<special_hermite_polynomial_he_x_scalar_out::schema>();
}

// aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_hermite_polynomial_he_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_hermite_polynomial_he_x_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_hermite_polynomial_he_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_hermite_polynomial_he_x_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_hermite_polynomial_he_n_scalar_out::schema> create_special_hermite_polynomial_he_n_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_hermite_polynomial_he_n_scalar_out::name, special_hermite_polynomial_he_n_scalar_out::overload_name)
      .typed<special_hermite_polynomial_he_n_scalar_out::schema>();
}

// aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_hermite_polynomial_he_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_hermite_polynomial_he_n_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_hermite_polynomial_he_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_hermite_polynomial_he_n_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_laguerre_polynomial_l::schema> create_special_laguerre_polynomial_l_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_laguerre_polynomial_l::name, special_laguerre_polynomial_l::overload_name)
      .typed<special_laguerre_polynomial_l::schema>();
}

// aten::special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor
at::Tensor special_laguerre_polynomial_l::call(const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_laguerre_polynomial_l_typed_handle();
    return op.call(x, n);
}

// aten::special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor
at::Tensor special_laguerre_polynomial_l::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_laguerre_polynomial_l_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_laguerre_polynomial_l_x_scalar::schema> create_special_laguerre_polynomial_l_x_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_laguerre_polynomial_l_x_scalar::name, special_laguerre_polynomial_l_x_scalar::overload_name)
      .typed<special_laguerre_polynomial_l_x_scalar::schema>();
}

// aten::special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_laguerre_polynomial_l_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_laguerre_polynomial_l_x_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_laguerre_polynomial_l_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_laguerre_polynomial_l_x_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_laguerre_polynomial_l_n_scalar::schema> create_special_laguerre_polynomial_l_n_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_laguerre_polynomial_l_n_scalar::name, special_laguerre_polynomial_l_n_scalar::overload_name)
      .typed<special_laguerre_polynomial_l_n_scalar::schema>();
}

// aten::special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_laguerre_polynomial_l_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_laguerre_polynomial_l_n_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_laguerre_polynomial_l_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_laguerre_polynomial_l_n_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_laguerre_polynomial_l_out::schema> create_special_laguerre_polynomial_l_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_laguerre_polynomial_l_out::name, special_laguerre_polynomial_l_out::overload_name)
      .typed<special_laguerre_polynomial_l_out::schema>();
}

// aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_laguerre_polynomial_l_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_laguerre_polynomial_l_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_laguerre_polynomial_l_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_laguerre_polynomial_l_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_laguerre_polynomial_l_x_scalar_out::schema> create_special_laguerre_polynomial_l_x_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_laguerre_polynomial_l_x_scalar_out::name, special_laguerre_polynomial_l_x_scalar_out::overload_name)
      .typed<special_laguerre_polynomial_l_x_scalar_out::schema>();
}

// aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_laguerre_polynomial_l_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_laguerre_polynomial_l_x_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_laguerre_polynomial_l_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_laguerre_polynomial_l_x_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_laguerre_polynomial_l_n_scalar_out::schema> create_special_laguerre_polynomial_l_n_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_laguerre_polynomial_l_n_scalar_out::name, special_laguerre_polynomial_l_n_scalar_out::overload_name)
      .typed<special_laguerre_polynomial_l_n_scalar_out::schema>();
}

// aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_laguerre_polynomial_l_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_laguerre_polynomial_l_n_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_laguerre_polynomial_l_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_laguerre_polynomial_l_n_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_legendre_polynomial_p::schema> create_special_legendre_polynomial_p_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_legendre_polynomial_p::name, special_legendre_polynomial_p::overload_name)
      .typed<special_legendre_polynomial_p::schema>();
}

// aten::special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor
at::Tensor special_legendre_polynomial_p::call(const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_legendre_polynomial_p_typed_handle();
    return op.call(x, n);
}

// aten::special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor
at::Tensor special_legendre_polynomial_p::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_legendre_polynomial_p_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_legendre_polynomial_p_x_scalar::schema> create_special_legendre_polynomial_p_x_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_legendre_polynomial_p_x_scalar::name, special_legendre_polynomial_p_x_scalar::overload_name)
      .typed<special_legendre_polynomial_p_x_scalar::schema>();
}

// aten::special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_legendre_polynomial_p_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_legendre_polynomial_p_x_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_legendre_polynomial_p_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_legendre_polynomial_p_x_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_legendre_polynomial_p_n_scalar::schema> create_special_legendre_polynomial_p_n_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_legendre_polynomial_p_n_scalar::name, special_legendre_polynomial_p_n_scalar::overload_name)
      .typed<special_legendre_polynomial_p_n_scalar::schema>();
}

// aten::special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_legendre_polynomial_p_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_legendre_polynomial_p_n_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_legendre_polynomial_p_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_legendre_polynomial_p_n_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_legendre_polynomial_p_out::schema> create_special_legendre_polynomial_p_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_legendre_polynomial_p_out::name, special_legendre_polynomial_p_out::overload_name)
      .typed<special_legendre_polynomial_p_out::schema>();
}

// aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_legendre_polynomial_p_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_legendre_polynomial_p_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_legendre_polynomial_p_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_legendre_polynomial_p_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_legendre_polynomial_p_x_scalar_out::schema> create_special_legendre_polynomial_p_x_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_legendre_polynomial_p_x_scalar_out::name, special_legendre_polynomial_p_x_scalar_out::overload_name)
      .typed<special_legendre_polynomial_p_x_scalar_out::schema>();
}

// aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_legendre_polynomial_p_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_legendre_polynomial_p_x_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_legendre_polynomial_p_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_legendre_polynomial_p_x_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_legendre_polynomial_p_n_scalar_out::schema> create_special_legendre_polynomial_p_n_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_legendre_polynomial_p_n_scalar_out::name, special_legendre_polynomial_p_n_scalar_out::overload_name)
      .typed<special_legendre_polynomial_p_n_scalar_out::schema>();
}

// aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_legendre_polynomial_p_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_legendre_polynomial_p_n_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_legendre_polynomial_p_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_legendre_polynomial_p_n_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_modified_bessel_i0(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_i0::schema> create_special_modified_bessel_i0_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_modified_bessel_i0::name, special_modified_bessel_i0::overload_name)
      .typed<special_modified_bessel_i0::schema>();
}

// aten::special_modified_bessel_i0(Tensor self) -> Tensor
at::Tensor special_modified_bessel_i0::call(const at::Tensor & self) {
    
    static auto op = create_special_modified_bessel_i0_typed_handle();
    return op.call(self);
}

// aten::special_modified_bessel_i0(Tensor self) -> Tensor
at::Tensor special_modified_bessel_i0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_modified_bessel_i0_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_i0_out::schema> create_special_modified_bessel_i0_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_modified_bessel_i0_out::name, special_modified_bessel_i0_out::overload_name)
      .typed<special_modified_bessel_i0_out::schema>();
}

// aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_modified_bessel_i0_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_modified_bessel_i0_out_typed_handle();
    return op.call(self, out);
}

// aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_modified_bessel_i0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_modified_bessel_i0_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_modified_bessel_i1(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_i1::schema> create_special_modified_bessel_i1_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_modified_bessel_i1::name, special_modified_bessel_i1::overload_name)
      .typed<special_modified_bessel_i1::schema>();
}

// aten::special_modified_bessel_i1(Tensor self) -> Tensor
at::Tensor special_modified_bessel_i1::call(const at::Tensor & self) {
    
    static auto op = create_special_modified_bessel_i1_typed_handle();
    return op.call(self);
}

// aten::special_modified_bessel_i1(Tensor self) -> Tensor
at::Tensor special_modified_bessel_i1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_modified_bessel_i1_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_i1_out::schema> create_special_modified_bessel_i1_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_modified_bessel_i1_out::name, special_modified_bessel_i1_out::overload_name)
      .typed<special_modified_bessel_i1_out::schema>();
}

// aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_modified_bessel_i1_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_modified_bessel_i1_out_typed_handle();
    return op.call(self, out);
}

// aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_modified_bessel_i1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_modified_bessel_i1_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_modified_bessel_k0(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_k0::schema> create_special_modified_bessel_k0_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_modified_bessel_k0::name, special_modified_bessel_k0::overload_name)
      .typed<special_modified_bessel_k0::schema>();
}

// aten::special_modified_bessel_k0(Tensor self) -> Tensor
at::Tensor special_modified_bessel_k0::call(const at::Tensor & self) {
    
    static auto op = create_special_modified_bessel_k0_typed_handle();
    return op.call(self);
}

// aten::special_modified_bessel_k0(Tensor self) -> Tensor
at::Tensor special_modified_bessel_k0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_modified_bessel_k0_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_k0_out::schema> create_special_modified_bessel_k0_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_modified_bessel_k0_out::name, special_modified_bessel_k0_out::overload_name)
      .typed<special_modified_bessel_k0_out::schema>();
}

// aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_modified_bessel_k0_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_modified_bessel_k0_out_typed_handle();
    return op.call(self, out);
}

// aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_modified_bessel_k0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_modified_bessel_k0_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_modified_bessel_k1(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_k1::schema> create_special_modified_bessel_k1_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_modified_bessel_k1::name, special_modified_bessel_k1::overload_name)
      .typed<special_modified_bessel_k1::schema>();
}

// aten::special_modified_bessel_k1(Tensor self) -> Tensor
at::Tensor special_modified_bessel_k1::call(const at::Tensor & self) {
    
    static auto op = create_special_modified_bessel_k1_typed_handle();
    return op.call(self);
}

// aten::special_modified_bessel_k1(Tensor self) -> Tensor
at::Tensor special_modified_bessel_k1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_special_modified_bessel_k1_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_modified_bessel_k1_out::schema> create_special_modified_bessel_k1_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_modified_bessel_k1_out::name, special_modified_bessel_k1_out::overload_name)
      .typed<special_modified_bessel_k1_out::schema>();
}

// aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_modified_bessel_k1_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_modified_bessel_k1_out_typed_handle();
    return op.call(self, out);
}

// aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_modified_bessel_k1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_special_modified_bessel_k1_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::special_scaled_modified_bessel_k0(Tensor x) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_scaled_modified_bessel_k0::schema> create_special_scaled_modified_bessel_k0_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_scaled_modified_bessel_k0::name, special_scaled_modified_bessel_k0::overload_name)
      .typed<special_scaled_modified_bessel_k0::schema>();
}

// aten::special_scaled_modified_bessel_k0(Tensor x) -> Tensor
at::Tensor special_scaled_modified_bessel_k0::call(const at::Tensor & x) {
    
    static auto op = create_special_scaled_modified_bessel_k0_typed_handle();
    return op.call(x);
}

// aten::special_scaled_modified_bessel_k0(Tensor x) -> Tensor
at::Tensor special_scaled_modified_bessel_k0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) {
    
    static auto op = create_special_scaled_modified_bessel_k0_typed_handle();
    return op.redispatch(dispatchKeySet, x);
}

// aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_scaled_modified_bessel_k0_out::schema> create_special_scaled_modified_bessel_k0_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_scaled_modified_bessel_k0_out::name, special_scaled_modified_bessel_k0_out::overload_name)
      .typed<special_scaled_modified_bessel_k0_out::schema>();
}

// aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_scaled_modified_bessel_k0_out::call(const at::Tensor & x, at::Tensor & out) {
    
    static auto op = create_special_scaled_modified_bessel_k0_out_typed_handle();
    return op.call(x, out);
}

// aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_scaled_modified_bessel_k0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) {
    
    static auto op = create_special_scaled_modified_bessel_k0_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, out);
}

// aten::special_scaled_modified_bessel_k1(Tensor x) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_scaled_modified_bessel_k1::schema> create_special_scaled_modified_bessel_k1_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_scaled_modified_bessel_k1::name, special_scaled_modified_bessel_k1::overload_name)
      .typed<special_scaled_modified_bessel_k1::schema>();
}

// aten::special_scaled_modified_bessel_k1(Tensor x) -> Tensor
at::Tensor special_scaled_modified_bessel_k1::call(const at::Tensor & x) {
    
    static auto op = create_special_scaled_modified_bessel_k1_typed_handle();
    return op.call(x);
}

// aten::special_scaled_modified_bessel_k1(Tensor x) -> Tensor
at::Tensor special_scaled_modified_bessel_k1::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) {
    
    static auto op = create_special_scaled_modified_bessel_k1_typed_handle();
    return op.redispatch(dispatchKeySet, x);
}

// aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_scaled_modified_bessel_k1_out::schema> create_special_scaled_modified_bessel_k1_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_scaled_modified_bessel_k1_out::name, special_scaled_modified_bessel_k1_out::overload_name)
      .typed<special_scaled_modified_bessel_k1_out::schema>();
}

// aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_scaled_modified_bessel_k1_out::call(const at::Tensor & x, at::Tensor & out) {
    
    static auto op = create_special_scaled_modified_bessel_k1_out_typed_handle();
    return op.call(x, out);
}

// aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_scaled_modified_bessel_k1_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) {
    
    static auto op = create_special_scaled_modified_bessel_k1_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, out);
}

// aten::special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_t::schema> create_special_shifted_chebyshev_polynomial_t_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_t::name, special_shifted_chebyshev_polynomial_t::overload_name)
      .typed<special_shifted_chebyshev_polynomial_t::schema>();
}

// aten::special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_t::call(const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_t_typed_handle();
    return op.call(x, n);
}

// aten::special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_t::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_t_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_t_x_scalar::schema> create_special_shifted_chebyshev_polynomial_t_x_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_t_x_scalar::name, special_shifted_chebyshev_polynomial_t_x_scalar::overload_name)
      .typed<special_shifted_chebyshev_polynomial_t_x_scalar::schema>();
}

// aten::special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_t_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_t_x_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_t_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_t_x_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_t_n_scalar::schema> create_special_shifted_chebyshev_polynomial_t_n_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_t_n_scalar::name, special_shifted_chebyshev_polynomial_t_n_scalar::overload_name)
      .typed<special_shifted_chebyshev_polynomial_t_n_scalar::schema>();
}

// aten::special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_t_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_t_n_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_t_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_t_n_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_t_out::schema> create_special_shifted_chebyshev_polynomial_t_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_t_out::name, special_shifted_chebyshev_polynomial_t_out::overload_name)
      .typed<special_shifted_chebyshev_polynomial_t_out::schema>();
}

// aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_t_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_t_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_t_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_t_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_t_x_scalar_out::schema> create_special_shifted_chebyshev_polynomial_t_x_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_t_x_scalar_out::name, special_shifted_chebyshev_polynomial_t_x_scalar_out::overload_name)
      .typed<special_shifted_chebyshev_polynomial_t_x_scalar_out::schema>();
}

// aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_t_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_t_x_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_t_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_t_x_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_t_n_scalar_out::schema> create_special_shifted_chebyshev_polynomial_t_n_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_t_n_scalar_out::name, special_shifted_chebyshev_polynomial_t_n_scalar_out::overload_name)
      .typed<special_shifted_chebyshev_polynomial_t_n_scalar_out::schema>();
}

// aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_t_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_t_n_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_t_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_t_n_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_u::schema> create_special_shifted_chebyshev_polynomial_u_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_u::name, special_shifted_chebyshev_polynomial_u::overload_name)
      .typed<special_shifted_chebyshev_polynomial_u::schema>();
}

// aten::special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_u::call(const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_typed_handle();
    return op.call(x, n);
}

// aten::special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_u::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_u_x_scalar::schema> create_special_shifted_chebyshev_polynomial_u_x_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_u_x_scalar::name, special_shifted_chebyshev_polynomial_u_x_scalar::overload_name)
      .typed<special_shifted_chebyshev_polynomial_u_x_scalar::schema>();
}

// aten::special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_u_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_x_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_u_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_x_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_u_n_scalar::schema> create_special_shifted_chebyshev_polynomial_u_n_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_u_n_scalar::name, special_shifted_chebyshev_polynomial_u_n_scalar::overload_name)
      .typed<special_shifted_chebyshev_polynomial_u_n_scalar::schema>();
}

// aten::special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_u_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_n_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_u_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_n_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_u_out::schema> create_special_shifted_chebyshev_polynomial_u_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_u_out::name, special_shifted_chebyshev_polynomial_u_out::overload_name)
      .typed<special_shifted_chebyshev_polynomial_u_out::schema>();
}

// aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_u_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_u_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_u_x_scalar_out::schema> create_special_shifted_chebyshev_polynomial_u_x_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_u_x_scalar_out::name, special_shifted_chebyshev_polynomial_u_x_scalar_out::overload_name)
      .typed<special_shifted_chebyshev_polynomial_u_x_scalar_out::schema>();
}

// aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_u_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_x_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_u_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_x_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_u_n_scalar_out::schema> create_special_shifted_chebyshev_polynomial_u_n_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_u_n_scalar_out::name, special_shifted_chebyshev_polynomial_u_n_scalar_out::overload_name)
      .typed<special_shifted_chebyshev_polynomial_u_n_scalar_out::schema>();
}

// aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_u_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_n_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_u_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_u_n_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_v::schema> create_special_shifted_chebyshev_polynomial_v_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_v::name, special_shifted_chebyshev_polynomial_v::overload_name)
      .typed<special_shifted_chebyshev_polynomial_v::schema>();
}

// aten::special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_v::call(const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_v_typed_handle();
    return op.call(x, n);
}

// aten::special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_v::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_v_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_v_x_scalar::schema> create_special_shifted_chebyshev_polynomial_v_x_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_v_x_scalar::name, special_shifted_chebyshev_polynomial_v_x_scalar::overload_name)
      .typed<special_shifted_chebyshev_polynomial_v_x_scalar::schema>();
}

// aten::special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_v_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_v_x_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_v_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_v_x_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_v_n_scalar::schema> create_special_shifted_chebyshev_polynomial_v_n_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_v_n_scalar::name, special_shifted_chebyshev_polynomial_v_n_scalar::overload_name)
      .typed<special_shifted_chebyshev_polynomial_v_n_scalar::schema>();
}

// aten::special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_v_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_v_n_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_v_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_v_n_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_v_out::schema> create_special_shifted_chebyshev_polynomial_v_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_v_out::name, special_shifted_chebyshev_polynomial_v_out::overload_name)
      .typed<special_shifted_chebyshev_polynomial_v_out::schema>();
}

// aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_v_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_v_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_v_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_v_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_v_x_scalar_out::schema> create_special_shifted_chebyshev_polynomial_v_x_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_v_x_scalar_out::name, special_shifted_chebyshev_polynomial_v_x_scalar_out::overload_name)
      .typed<special_shifted_chebyshev_polynomial_v_x_scalar_out::schema>();
}

// aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_v_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_v_x_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_v_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_v_x_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_v_n_scalar_out::schema> create_special_shifted_chebyshev_polynomial_v_n_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_v_n_scalar_out::name, special_shifted_chebyshev_polynomial_v_n_scalar_out::overload_name)
      .typed<special_shifted_chebyshev_polynomial_v_n_scalar_out::schema>();
}

// aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_v_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_v_n_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_v_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_v_n_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_w::schema> create_special_shifted_chebyshev_polynomial_w_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_w::name, special_shifted_chebyshev_polynomial_w::overload_name)
      .typed<special_shifted_chebyshev_polynomial_w::schema>();
}

// aten::special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_w::call(const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_w_typed_handle();
    return op.call(x, n);
}

// aten::special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_w::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_w_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_w_x_scalar::schema> create_special_shifted_chebyshev_polynomial_w_x_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_w_x_scalar::name, special_shifted_chebyshev_polynomial_w_x_scalar::overload_name)
      .typed<special_shifted_chebyshev_polynomial_w_x_scalar::schema>();
}

// aten::special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_w_x_scalar::call(const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_w_x_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_w_x_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_w_x_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_w_n_scalar::schema> create_special_shifted_chebyshev_polynomial_w_n_scalar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_w_n_scalar::name, special_shifted_chebyshev_polynomial_w_n_scalar::overload_name)
      .typed<special_shifted_chebyshev_polynomial_w_n_scalar::schema>();
}

// aten::special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_w_n_scalar::call(const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_w_n_scalar_typed_handle();
    return op.call(x, n);
}

// aten::special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
at::Tensor special_shifted_chebyshev_polynomial_w_n_scalar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_w_n_scalar_typed_handle();
    return op.redispatch(dispatchKeySet, x, n);
}

// aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_w_out::schema> create_special_shifted_chebyshev_polynomial_w_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_w_out::name, special_shifted_chebyshev_polynomial_w_out::overload_name)
      .typed<special_shifted_chebyshev_polynomial_w_out::schema>();
}

// aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_w_out::call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_w_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_w_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_w_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_w_x_scalar_out::schema> create_special_shifted_chebyshev_polynomial_w_x_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_w_x_scalar_out::name, special_shifted_chebyshev_polynomial_w_x_scalar_out::overload_name)
      .typed<special_shifted_chebyshev_polynomial_w_x_scalar_out::schema>();
}

// aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_w_x_scalar_out::call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_w_x_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_w_x_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_w_x_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_shifted_chebyshev_polynomial_w_n_scalar_out::schema> create_special_shifted_chebyshev_polynomial_w_n_scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_shifted_chebyshev_polynomial_w_n_scalar_out::name, special_shifted_chebyshev_polynomial_w_n_scalar_out::overload_name)
      .typed<special_shifted_chebyshev_polynomial_w_n_scalar_out::schema>();
}

// aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_w_n_scalar_out::call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_w_n_scalar_out_typed_handle();
    return op.call(x, n, out);
}

// aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_shifted_chebyshev_polynomial_w_n_scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
    
    static auto op = create_special_shifted_chebyshev_polynomial_w_n_scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, n, out);
}

// aten::special_spherical_bessel_j0(Tensor x) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<special_spherical_bessel_j0::schema> create_special_spherical_bessel_j0_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_spherical_bessel_j0::name, special_spherical_bessel_j0::overload_name)
      .typed<special_spherical_bessel_j0::schema>();
}

// aten::special_spherical_bessel_j0(Tensor x) -> Tensor
at::Tensor special_spherical_bessel_j0::call(const at::Tensor & x) {
    
    static auto op = create_special_spherical_bessel_j0_typed_handle();
    return op.call(x);
}

// aten::special_spherical_bessel_j0(Tensor x) -> Tensor
at::Tensor special_spherical_bessel_j0::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) {
    
    static auto op = create_special_spherical_bessel_j0_typed_handle();
    return op.redispatch(dispatchKeySet, x);
}

// aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<special_spherical_bessel_j0_out::schema> create_special_spherical_bessel_j0_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(special_spherical_bessel_j0_out::name, special_spherical_bessel_j0_out::overload_name)
      .typed<special_spherical_bessel_j0_out::schema>();
}

// aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_spherical_bessel_j0_out::call(const at::Tensor & x, at::Tensor & out) {
    
    static auto op = create_special_spherical_bessel_j0_out_typed_handle();
    return op.call(x, out);
}

// aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & special_spherical_bessel_j0_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) {
    
    static auto op = create_special_spherical_bessel_j0_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, out);
}

// aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_foobar::schema> create__foobar_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foobar::name, _foobar::overload_name)
      .typed<_foobar::schema>();
}

// aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor
at::Tensor _foobar::call(const at::Tensor & self, bool arg1, bool arg2, bool arg3) {
    
    static auto op = create__foobar_typed_handle();
    return op.call(self, arg1, arg2, arg3);
}

// aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor
at::Tensor _foobar::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool arg1, bool arg2, bool arg3) {
    
    static auto op = create__foobar_typed_handle();
    return op.redispatch(dispatchKeySet, self, arg1, arg2, arg3);
}

// aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adam_::schema> create__fused_adam__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adam_::name, _fused_adam_::overload_name)
      .typed<_fused_adam_::schema>();
}

// aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_adam_::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam__typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_adam_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam__typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adam__tensor_lr::schema> create__fused_adam__tensor_lr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adam__tensor_lr::name, _fused_adam__tensor_lr::overload_name)
      .typed<_fused_adam__tensor_lr::schema>();
}

// aten::_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_adam__tensor_lr::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam__tensor_lr_typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_adam__tensor_lr::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam__tensor_lr_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adamw_::schema> create__fused_adamw__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adamw_::name, _fused_adamw_::overload_name)
      .typed<_fused_adamw_::schema>();
}

// aten::_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_adamw_::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adamw__typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_adamw_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adamw__typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adamw_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adamw__tensor_lr::schema> create__fused_adamw__tensor_lr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adamw__tensor_lr::name, _fused_adamw__tensor_lr::overload_name)
      .typed<_fused_adamw__tensor_lr::schema>();
}

// aten::_fused_adamw_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_adamw__tensor_lr::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adamw__tensor_lr_typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adamw_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_adamw__tensor_lr::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adamw__tensor_lr_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_sgd_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_sgd_::schema> create__fused_sgd__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_sgd_::name, _fused_sgd_::overload_name)
      .typed<_fused_sgd_::schema>();
}

// aten::_fused_sgd_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_sgd_::call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd__typed_handle();
    return op.call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

// aten::_fused_sgd_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_sgd_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd__typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

// aten::_fused_sgd_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_sgd__tensor_lr::schema> create__fused_sgd__tensor_lr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_sgd__tensor_lr::name, _fused_sgd__tensor_lr::overload_name)
      .typed<_fused_sgd__tensor_lr::schema>();
}

// aten::_fused_sgd_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_sgd__tensor_lr::call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd__tensor_lr_typed_handle();
    return op.call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

// aten::_fused_sgd_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_sgd__tensor_lr::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd__tensor_lr_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

// aten::_fused_adagrad_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] state_sums, Tensor(d!)[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adagrad_::schema> create__fused_adagrad__typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adagrad_::name, _fused_adagrad_::overload_name)
      .typed<_fused_adagrad_::schema>();
}

// aten::_fused_adagrad_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] state_sums, Tensor(d!)[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_adagrad_::call(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adagrad__typed_handle();
    return op.call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf);
}

// aten::_fused_adagrad_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] state_sums, Tensor(d!)[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
void _fused_adagrad_::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adagrad__typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf);
}

// aten::_propagate_xla_data(Tensor input, Tensor output) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_propagate_xla_data::schema> create__propagate_xla_data_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_propagate_xla_data::name, _propagate_xla_data::overload_name)
      .typed<_propagate_xla_data::schema>();
}

// aten::_propagate_xla_data(Tensor input, Tensor output) -> ()
void _propagate_xla_data::call(const at::Tensor & input, const at::Tensor & output) {
    
    static auto op = create__propagate_xla_data_typed_handle();
    return op.call(input, output);
}

// aten::_propagate_xla_data(Tensor input, Tensor output) -> ()
void _propagate_xla_data::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & output) {
    
    static auto op = create__propagate_xla_data_typed_handle();
    return op.redispatch(dispatchKeySet, input, output);
}

// aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_new_zeros_with_same_feature_meta_out::schema> create__new_zeros_with_same_feature_meta_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_new_zeros_with_same_feature_meta_out::name, _new_zeros_with_same_feature_meta_out::overload_name)
      .typed<_new_zeros_with_same_feature_meta_out::schema>();
}

// aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _new_zeros_with_same_feature_meta_out::call(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out) {
    
    static auto op = create__new_zeros_with_same_feature_meta_out_typed_handle();
    return op.call(self, other, self_num_batch_dims, out);
}

// aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _new_zeros_with_same_feature_meta_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out) {
    
    static auto op = create__new_zeros_with_same_feature_meta_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, self_num_batch_dims, out);
}

// aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_ctc_loss_out::schema> create__cudnn_ctc_loss_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cudnn_ctc_loss_out::name, _cudnn_ctc_loss_out::overload_name)
      .typed<_cudnn_ctc_loss_out::schema>();
}

// aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _cudnn_ctc_loss_out::call(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__cudnn_ctc_loss_out_typed_handle();
    return op.call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0, out1);
}

// aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _cudnn_ctc_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__cudnn_ctc_loss_out_typed_handle();
    return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0, out1);
}

// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_rnn_flatten_weight_out::schema> create__cudnn_rnn_flatten_weight_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cudnn_rnn_flatten_weight_out::name, _cudnn_rnn_flatten_weight_out::overload_name)
      .typed<_cudnn_rnn_flatten_weight_out::schema>();
}

// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cudnn_rnn_flatten_weight_out::call(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) {
    
    static auto op = create__cudnn_rnn_flatten_weight_out_typed_handle();
    return op.call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
}

// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cudnn_rnn_flatten_weight_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) {
    
    static auto op = create__cudnn_rnn_flatten_weight_out_typed_handle();
    return op.redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
}

// aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_rnn_out::schema> create__cudnn_rnn_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cudnn_rnn_out::name, _cudnn_rnn_out::overload_name)
      .typed<_cudnn_rnn_out::schema>();
}

// aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_out::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const ::std::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
    
    static auto op = create__cudnn_rnn_out_typed_handle();
    return op.call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
}

// aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const ::std::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
    
    static auto op = create__cudnn_rnn_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
}

// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_rnn_backward_out::schema> create__cudnn_rnn_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cudnn_rnn_backward_out::name, _cudnn_rnn_backward_out::overload_name)
      .typed<_cudnn_rnn_backward_out::schema>();
}

// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
void _cudnn_rnn_backward_out::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
    
    static auto op = create__cudnn_rnn_backward_out_typed_handle();
    return op.call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
}

// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
void _cudnn_rnn_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
    
    static auto op = create__cudnn_rnn_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
}

// aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_cudnn_init_dropout_state_out::schema> create__cudnn_init_dropout_state_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cudnn_init_dropout_state_out::name, _cudnn_init_dropout_state_out::overload_name)
      .typed<_cudnn_init_dropout_state_out::schema>();
}

// aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cudnn_init_dropout_state_out::call(double dropout, bool train, int64_t dropout_seed, at::Tensor & out) {
    
    static auto op = create__cudnn_init_dropout_state_out_typed_handle();
    return op.call(dropout, train, dropout_seed, out);
}

// aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cudnn_init_dropout_state_out::redispatch(c10::DispatchKeySet dispatchKeySet, double dropout, bool train, int64_t dropout_seed, at::Tensor & out) {
    
    static auto op = create__cudnn_init_dropout_state_out_typed_handle();
    return op.redispatch(dispatchKeySet, dropout, train, dropout_seed, out);
}

// aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<_fused_dropout_out::schema> create__fused_dropout_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_dropout_out::name, _fused_dropout_out::overload_name)
      .typed<_fused_dropout_out::schema>();
}

// aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _fused_dropout_out::call(const at::Tensor & self, double p, ::std::optional<at::Generator> generator, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__fused_dropout_out_typed_handle();
    return op.call(self, p, generator, out0, out1);
}

// aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _fused_dropout_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional<at::Generator> generator, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__fused_dropout_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator, out0, out1);
}

// aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_masked_scale_out::schema> create__masked_scale_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_masked_scale_out::name, _masked_scale_out::overload_name)
      .typed<_masked_scale_out::schema>();
}

// aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _masked_scale_out::call(const at::Tensor & self, const at::Tensor & mask, double scale, at::Tensor & out) {
    
    static auto op = create__masked_scale_out_typed_handle();
    return op.call(self, mask, scale, out);
}

// aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _masked_scale_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, double scale, at::Tensor & out) {
    
    static auto op = create__masked_scale_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, scale, out);
}

// aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<native_dropout_out::schema> create_native_dropout_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_dropout_out::name, native_dropout_out::overload_name)
      .typed<native_dropout_out::schema>();
}

// aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> native_dropout_out::call(const at::Tensor & input, double p, ::std::optional<bool> train, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_native_dropout_out_typed_handle();
    return op.call(input, p, train, out0, out1);
}

// aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> native_dropout_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, ::std::optional<bool> train, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_native_dropout_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, p, train, out0, out1);
}

// aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<native_dropout_backward_out::schema> create_native_dropout_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_dropout_backward_out::name, native_dropout_backward_out::overload_name)
      .typed<native_dropout_backward_out::schema>();
}

// aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & native_dropout_backward_out::call(const at::Tensor & grad_output, const at::Tensor & mask, double scale, at::Tensor & out) {
    
    static auto op = create_native_dropout_backward_out_typed_handle();
    return op.call(grad_output, mask, scale, out);
}

// aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & native_dropout_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, double scale, at::Tensor & out) {
    
    static auto op = create_native_dropout_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, mask, scale, out);
}

// aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_conj_physical_out::schema> create__conj_physical_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_conj_physical_out::name, _conj_physical_out::overload_name)
      .typed<_conj_physical_out::schema>();
}

// aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _conj_physical_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__conj_physical_out_typed_handle();
    return op.call(self, out);
}

// aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _conj_physical_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__conj_physical_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::avg_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<avg_pool1d_out::schema> create_avg_pool1d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(avg_pool1d_out::name, avg_pool1d_out::overload_name)
      .typed<avg_pool1d_out::schema>();
}

// aten::avg_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & avg_pool1d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, at::Tensor & out) {
    
    static auto op = create_avg_pool1d_out_typed_handle();
    return op.call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, out);
}

// aten::avg_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & avg_pool1d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, at::Tensor & out) {
    
    static auto op = create_avg_pool1d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, out);
}

// aten::adaptive_avg_pool1d.out(Tensor self, int[1] output_size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<adaptive_avg_pool1d_out::schema> create_adaptive_avg_pool1d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(adaptive_avg_pool1d_out::name, adaptive_avg_pool1d_out::overload_name)
      .typed<adaptive_avg_pool1d_out::schema>();
}

// aten::adaptive_avg_pool1d.out(Tensor self, int[1] output_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & adaptive_avg_pool1d_out::call(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create_adaptive_avg_pool1d_out_typed_handle();
    return op.call(self, output_size, out);
}

// aten::adaptive_avg_pool1d.out(Tensor self, int[1] output_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & adaptive_avg_pool1d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create_adaptive_avg_pool1d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, out);
}

// aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_add_relu_Scalar_out::schema> create__add_relu_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_add_relu_Scalar_out::name, _add_relu_Scalar_out::overload_name)
      .typed<_add_relu_Scalar_out::schema>();
}

// aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _add_relu_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create__add_relu_Scalar_out_typed_handle();
    return op.call(self, other, alpha, out);
}

// aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _add_relu_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create__add_relu_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha, out);
}

// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<add_Scalar_out::schema> create_add_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(add_Scalar_out::name, add_Scalar_out::overload_name)
      .typed<add_Scalar_out::schema>();
}

// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & add_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_add_Scalar_out_typed_handle();
    return op.call(self, other, alpha, out);
}

// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & add_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_add_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha, out);
}

// aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<affine_grid_generator_out::schema> create_affine_grid_generator_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(affine_grid_generator_out::name, affine_grid_generator_out::overload_name)
      .typed<affine_grid_generator_out::schema>();
}

// aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & affine_grid_generator_out::call(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners, at::Tensor & out) {
    
    static auto op = create_affine_grid_generator_out_typed_handle();
    return op.call(theta, size, align_corners, out);
}

// aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & affine_grid_generator_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners, at::Tensor & out) {
    
    static auto op = create_affine_grid_generator_out_typed_handle();
    return op.redispatch(dispatchKeySet, theta, size, align_corners, out);
}

// aten::_test_functorch_fallback.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_test_functorch_fallback_out::schema> create__test_functorch_fallback_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_functorch_fallback_out::name, _test_functorch_fallback_out::overload_name)
      .typed<_test_functorch_fallback_out::schema>();
}

// aten::_test_functorch_fallback.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_functorch_fallback_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create__test_functorch_fallback_out_typed_handle();
    return op.call(self, other, out);
}

// aten::_test_functorch_fallback.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_functorch_fallback_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create__test_functorch_fallback_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bartlett_window_out::schema> create_bartlett_window_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bartlett_window_out::name, bartlett_window_out::overload_name)
      .typed<bartlett_window_out::schema>();
}

// aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bartlett_window_out::call(int64_t window_length, at::Tensor & out) {
    
    static auto op = create_bartlett_window_out_typed_handle();
    return op.call(window_length, out);
}

// aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bartlett_window_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
    
    static auto op = create_bartlett_window_out_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, out);
}

// aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bartlett_window_periodic_out::schema> create_bartlett_window_periodic_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bartlett_window_periodic_out::name, bartlett_window_periodic_out::overload_name)
      .typed<bartlett_window_periodic_out::schema>();
}

// aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bartlett_window_periodic_out::call(int64_t window_length, bool periodic, at::Tensor & out) {
    
    static auto op = create_bartlett_window_periodic_out_typed_handle();
    return op.call(window_length, periodic, out);
}

// aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bartlett_window_periodic_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
    
    static auto op = create_bartlett_window_periodic_out_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, out);
}

// aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<quantized_batch_norm_out::schema> create_quantized_batch_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantized_batch_norm_out::name, quantized_batch_norm_out::overload_name)
      .typed<quantized_batch_norm_out::schema>();
}

// aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantized_batch_norm_out::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point, at::Tensor & out) {
    
    static auto op = create_quantized_batch_norm_out_typed_handle();
    return op.call(input, weight, bias, mean, var, eps, output_scale, output_zero_point, out);
}

// aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantized_batch_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point, at::Tensor & out) {
    
    static auto op = create_quantized_batch_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, mean, var, eps, output_scale, output_zero_point, out);
}

// aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli_Tensor_out::schema> create_bernoulli_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli_Tensor_out::name, bernoulli_Tensor_out::overload_name)
      .typed<bernoulli_Tensor_out::schema>();
}

// aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bernoulli_Tensor_out::call(const at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_bernoulli_Tensor_out_typed_handle();
    return op.call(self, p, generator, out);
}

// aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bernoulli_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_bernoulli_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator, out);
}

// aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli_Tensor::schema> create_bernoulli_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli_Tensor::name, bernoulli_Tensor::overload_name)
      .typed<bernoulli_Tensor::schema>();
}

// aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor
at::Tensor bernoulli_Tensor::call(const at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli_Tensor_typed_handle();
    return op.call(self, p, generator);
}

// aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor
at::Tensor bernoulli_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_bernoulli_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator);
}

// aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bernoulli_float_out::schema> create_bernoulli_float_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bernoulli_float_out::name, bernoulli_float_out::overload_name)
      .typed<bernoulli_float_out::schema>();
}

// aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bernoulli_float_out::call(const at::Tensor & self, double p, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_bernoulli_float_out_typed_handle();
    return op.call(self, p, generator, out);
}

// aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bernoulli_float_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_bernoulli_float_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator, out);
}

// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<binary_cross_entropy_with_logits_out::schema> create_binary_cross_entropy_with_logits_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(binary_cross_entropy_with_logits_out::name, binary_cross_entropy_with_logits_out::overload_name)
      .typed<binary_cross_entropy_with_logits_out::schema>();
}

// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & binary_cross_entropy_with_logits_out::call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & pos_weight, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_binary_cross_entropy_with_logits_out_typed_handle();
    return op.call(self, target, weight, pos_weight, reduction, out);
}

// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & binary_cross_entropy_with_logits_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & pos_weight, int64_t reduction, at::Tensor & out) {
    
    static auto op = create_binary_cross_entropy_with_logits_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, target, weight, pos_weight, reduction, out);
}

// aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bincount_out::schema> create_bincount_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bincount_out::name, bincount_out::overload_name)
      .typed<bincount_out::schema>();
}

// aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bincount_out::call(const at::Tensor & self, const ::std::optional<at::Tensor> & weights, int64_t minlength, at::Tensor & out) {
    
    static auto op = create_bincount_out_typed_handle();
    return op.call(self, weights, minlength, out);
}

// aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bincount_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & weights, int64_t minlength, at::Tensor & out) {
    
    static auto op = create_bincount_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weights, minlength, out);
}

// aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<blackman_window_out::schema> create_blackman_window_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(blackman_window_out::name, blackman_window_out::overload_name)
      .typed<blackman_window_out::schema>();
}

// aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & blackman_window_out::call(int64_t window_length, at::Tensor & out) {
    
    static auto op = create_blackman_window_out_typed_handle();
    return op.call(window_length, out);
}

// aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & blackman_window_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
    
    static auto op = create_blackman_window_out_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, out);
}

// aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<blackman_window_periodic_out::schema> create_blackman_window_periodic_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(blackman_window_periodic_out::name, blackman_window_periodic_out::overload_name)
      .typed<blackman_window_periodic_out::schema>();
}

// aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & blackman_window_periodic_out::call(int64_t window_length, bool periodic, at::Tensor & out) {
    
    static auto op = create_blackman_window_periodic_out_typed_handle();
    return op.call(window_length, periodic, out);
}

// aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & blackman_window_periodic_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
    
    static auto op = create_blackman_window_periodic_out_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, out);
}

// aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<block_diag_out::schema> create_block_diag_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(block_diag_out::name, block_diag_out::overload_name)
      .typed<block_diag_out::schema>();
}

// aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & block_diag_out::call(at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_block_diag_out_typed_handle();
    return op.call(tensors, out);
}

// aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & block_diag_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
    
    static auto op = create_block_diag_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, out);
}

// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<constant_pad_nd_out::schema> create_constant_pad_nd_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(constant_pad_nd_out::name, constant_pad_nd_out::overload_name)
      .typed<constant_pad_nd_out::schema>();
}

// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & constant_pad_nd_out::call(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_constant_pad_nd_out_typed_handle();
    return op.call(self, pad, value, out);
}

// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & constant_pad_nd_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_constant_pad_nd_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, pad, value, out);
}

// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<convolution_out::schema> create_convolution_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(convolution_out::name, convolution_out::overload_name)
      .typed<convolution_out::schema>();
}

// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & convolution_out::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create_convolution_out_typed_handle();
    return op.call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out);
}

// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create_convolution_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out);
}

// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<convolution_backward_out::schema> create_convolution_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(convolution_backward_out::name, convolution_backward_out::overload_name)
      .typed<convolution_backward_out::schema>();
}

// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_out::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_convolution_backward_out_typed_handle();
    return op.call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2);
}

// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_convolution_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2);
}

// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<convolution_overrideable_out::schema> create_convolution_overrideable_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(convolution_overrideable_out::name, convolution_overrideable_out::overload_name)
      .typed<convolution_overrideable_out::schema>();
}

// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & convolution_overrideable_out::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create_convolution_overrideable_out_typed_handle();
    return op.call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out);
}

// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & convolution_overrideable_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create_convolution_overrideable_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out);
}

// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<convolution_backward_overrideable_out::schema> create_convolution_backward_overrideable_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(convolution_backward_overrideable_out::name, convolution_backward_overrideable_out::overload_name)
      .typed<convolution_backward_overrideable_out::schema>();
}

// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_overrideable_out::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_convolution_backward_overrideable_out_typed_handle();
    return op.call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2);
}

// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_overrideable_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_convolution_backward_overrideable_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2);
}

// aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_convolution_out::schema> create__convolution_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_convolution_out::name, _convolution_out::overload_name)
      .typed<_convolution_out::schema>();
}

// aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _convolution_out::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) {
    
    static auto op = create__convolution_out_typed_handle();
    return op.call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out);
}

// aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) {
    
    static auto op = create__convolution_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out);
}

// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<conv_tbc_out::schema> create_conv_tbc_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conv_tbc_out::name, conv_tbc_out::overload_name)
      .typed<conv_tbc_out::schema>();
}

// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & conv_tbc_out::call(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad, at::Tensor & out) {
    
    static auto op = create_conv_tbc_out_typed_handle();
    return op.call(self, weight, bias, pad, out);
}

// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & conv_tbc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad, at::Tensor & out) {
    
    static auto op = create_conv_tbc_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, pad, out);
}

// aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<copy_out::schema> create_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(copy_out::name, copy_out::overload_name)
      .typed<copy_out::schema>();
}

// aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & copy_out::call(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) {
    
    static auto op = create_copy_out_typed_handle();
    return op.call(self, src, non_blocking, out);
}

// aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) {
    
    static auto op = create_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, src, non_blocking, out);
}

// aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_copy_from_out::schema> create__copy_from_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_copy_from_out::name, _copy_from_out::overload_name)
      .typed<_copy_from_out::schema>();
}

// aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _copy_from_out::call(const at::Tensor & self, const at::Tensor & dst, bool non_blocking, at::Tensor & out) {
    
    static auto op = create__copy_from_out_typed_handle();
    return op.call(self, dst, non_blocking, out);
}

// aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _copy_from_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, bool non_blocking, at::Tensor & out) {
    
    static auto op = create__copy_from_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dst, non_blocking, out);
}

// aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_copy_from_and_resize_out::schema> create__copy_from_and_resize_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_copy_from_and_resize_out::name, _copy_from_and_resize_out::overload_name)
      .typed<_copy_from_and_resize_out::schema>();
}

// aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _copy_from_and_resize_out::call(const at::Tensor & self, const at::Tensor & dst, at::Tensor & out) {
    
    static auto op = create__copy_from_and_resize_out_typed_handle();
    return op.call(self, dst, out);
}

// aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _copy_from_and_resize_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, at::Tensor & out) {
    
    static auto op = create__copy_from_and_resize_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dst, out);
}

// aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<count_nonzero_dim_IntList_out::schema> create_count_nonzero_dim_IntList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(count_nonzero_dim_IntList_out::name, count_nonzero_dim_IntList_out::overload_name)
      .typed<count_nonzero_dim_IntList_out::schema>();
}

// aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & count_nonzero_dim_IntList_out::call(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
    
    static auto op = create_count_nonzero_dim_IntList_out_typed_handle();
    return op.call(self, dim, out);
}

// aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & count_nonzero_dim_IntList_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
    
    static auto op = create_count_nonzero_dim_IntList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, out);
}

// aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<count_nonzero_out::schema> create_count_nonzero_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(count_nonzero_out::name, count_nonzero_out::overload_name)
      .typed<count_nonzero_out::schema>();
}

// aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & count_nonzero_out::call(const at::Tensor & self, ::std::optional<int64_t> dim, at::Tensor & out) {
    
    static auto op = create_count_nonzero_out_typed_handle();
    return op.call(self, dim, out);
}

// aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & count_nonzero_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim, at::Tensor & out) {
    
    static auto op = create_count_nonzero_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, out);
}

// aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_affine_grid_generator_out::schema> create_cudnn_affine_grid_generator_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_affine_grid_generator_out::name, cudnn_affine_grid_generator_out::overload_name)
      .typed<cudnn_affine_grid_generator_out::schema>();
}

// aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_affine_grid_generator_out::call(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) {
    
    static auto op = create_cudnn_affine_grid_generator_out_typed_handle();
    return op.call(theta, N, C, H, W, out);
}

// aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_affine_grid_generator_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) {
    
    static auto op = create_cudnn_affine_grid_generator_out_typed_handle();
    return op.redispatch(dispatchKeySet, theta, N, C, H, W, out);
}

// aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_affine_grid_generator_backward_out::schema> create_cudnn_affine_grid_generator_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_affine_grid_generator_backward_out::name, cudnn_affine_grid_generator_backward_out::overload_name)
      .typed<cudnn_affine_grid_generator_backward_out::schema>();
}

// aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_affine_grid_generator_backward_out::call(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) {
    
    static auto op = create_cudnn_affine_grid_generator_backward_out_typed_handle();
    return op.call(grad, N, C, H, W, out);
}

// aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_affine_grid_generator_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) {
    
    static auto op = create_cudnn_affine_grid_generator_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad, N, C, H, W, out);
}

// aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_batch_norm_out::schema> create_cudnn_batch_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_batch_norm_out::name, cudnn_batch_norm_out::overload_name)
      .typed<cudnn_batch_norm_out::schema>();
}

// aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_out::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
    
    static auto op = create_cudnn_batch_norm_out_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2, out3);
}

// aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
    
    static auto op = create_cudnn_batch_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2, out3);
}

// aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_batch_norm_backward_out::schema> create_cudnn_batch_norm_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_batch_norm_backward_out::name, cudnn_batch_norm_backward_out::overload_name)
      .typed<cudnn_batch_norm_backward_out::schema>();
}

// aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_backward_out::call(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_cudnn_batch_norm_backward_out_typed_handle();
    return op.call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace, out0, out1, out2);
}

// aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_cudnn_batch_norm_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace, out0, out1, out2);
}

// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution_transpose_out::schema> create_cudnn_convolution_transpose_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_convolution_transpose_out::name, cudnn_convolution_transpose_out::overload_name)
      .typed<cudnn_convolution_transpose_out::schema>();
}

// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_convolution_transpose_out::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
    
    static auto op = create_cudnn_convolution_transpose_out_typed_handle();
    return op.call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
}

// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_convolution_transpose_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
    
    static auto op = create_cudnn_convolution_transpose_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
}

// aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_mps_convolution_transpose_out::schema> create__mps_convolution_transpose_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_mps_convolution_transpose_out::name, _mps_convolution_transpose_out::overload_name)
      .typed<_mps_convolution_transpose_out::schema>();
}

// aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _mps_convolution_transpose_out::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create__mps_convolution_transpose_out_typed_handle();
    return op.call(self, weight, padding, output_padding, stride, dilation, groups, out);
}

// aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _mps_convolution_transpose_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create__mps_convolution_transpose_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, out);
}

// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<mps_convolution_transpose_backward_out::schema> create_mps_convolution_transpose_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mps_convolution_transpose_backward_out::name, mps_convolution_transpose_backward_out::overload_name)
      .typed<mps_convolution_transpose_backward_out::schema>();
}

// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> mps_convolution_transpose_backward_out::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_mps_convolution_transpose_backward_out_typed_handle();
    return op.call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1);
}

// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> mps_convolution_transpose_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_mps_convolution_transpose_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1);
}

// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution_relu_out::schema> create_cudnn_convolution_relu_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_convolution_relu_out::name, cudnn_convolution_relu_out::overload_name)
      .typed<cudnn_convolution_relu_out::schema>();
}

// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_convolution_relu_out::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create_cudnn_convolution_relu_out_typed_handle();
    return op.call(self, weight, bias, stride, padding, dilation, groups, out);
}

// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_convolution_relu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create_cudnn_convolution_relu_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, stride, padding, dilation, groups, out);
}

// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_convolution_add_relu_out::schema> create_cudnn_convolution_add_relu_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_convolution_add_relu_out::name, cudnn_convolution_add_relu_out::overload_name)
      .typed<cudnn_convolution_add_relu_out::schema>();
}

// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_convolution_add_relu_out::call(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const ::std::optional<at::Scalar> & alpha, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create_cudnn_convolution_add_relu_out_typed_handle();
    return op.call(self, weight, z, alpha, bias, stride, padding, dilation, groups, out);
}

// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_convolution_add_relu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const ::std::optional<at::Scalar> & alpha, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create_cudnn_convolution_add_relu_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, z, alpha, bias, stride, padding, dilation, groups, out);
}

// aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_grid_sampler_out::schema> create_cudnn_grid_sampler_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_grid_sampler_out::name, cudnn_grid_sampler_out::overload_name)
      .typed<cudnn_grid_sampler_out::schema>();
}

// aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_grid_sampler_out::call(const at::Tensor & self, const at::Tensor & grid, at::Tensor & out) {
    
    static auto op = create_cudnn_grid_sampler_out_typed_handle();
    return op.call(self, grid, out);
}

// aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cudnn_grid_sampler_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, at::Tensor & out) {
    
    static auto op = create_cudnn_grid_sampler_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grid, out);
}

// aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<cudnn_grid_sampler_backward_out::schema> create_cudnn_grid_sampler_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cudnn_grid_sampler_backward_out::name, cudnn_grid_sampler_backward_out::overload_name)
      .typed<cudnn_grid_sampler_backward_out::schema>();
}

// aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_out::call(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_cudnn_grid_sampler_backward_out_typed_handle();
    return op.call(self, grid, grad_output, out0, out1);
}

// aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_cudnn_grid_sampler_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grid, grad_output, out0, out1);
}

// aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<_ctc_loss_out::schema> create__ctc_loss_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_ctc_loss_out::name, _ctc_loss_out::overload_name)
      .typed<_ctc_loss_out::schema>();
}

// aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_out::call(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__ctc_loss_out_typed_handle();
    return op.call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
}

// aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__ctc_loss_out_typed_handle();
    return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
}

// aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<_ctc_loss_Tensor_out::schema> create__ctc_loss_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_ctc_loss_Tensor_out::name, _ctc_loss_Tensor_out::overload_name)
      .typed<_ctc_loss_Tensor_out::schema>();
}

// aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_Tensor_out::call(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__ctc_loss_Tensor_out_typed_handle();
    return op.call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
}

// aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__ctc_loss_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
}

// aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_ctc_loss_backward_out::schema> create__ctc_loss_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_ctc_loss_backward_out::name, _ctc_loss_backward_out::overload_name)
      .typed<_ctc_loss_backward_out::schema>();
}

// aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _ctc_loss_backward_out::call(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out) {
    
    static auto op = create__ctc_loss_backward_out_typed_handle();
    return op.call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity, out);
}

// aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _ctc_loss_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out) {
    
    static auto op = create__ctc_loss_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity, out);
}

// aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<diag_embed_out::schema> create_diag_embed_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diag_embed_out::name, diag_embed_out::overload_name)
      .typed<diag_embed_out::schema>();
}

// aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diag_embed_out::call(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
    
    static auto op = create_diag_embed_out_typed_handle();
    return op.call(self, offset, dim1, dim2, out);
}

// aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diag_embed_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
    
    static auto op = create_diag_embed_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, offset, dim1, dim2, out);
}

// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<diagonal_backward_out::schema> create_diagonal_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diagonal_backward_out::name, diagonal_backward_out::overload_name)
      .typed<diagonal_backward_out::schema>();
}

// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diagonal_backward_out::call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
    
    static auto op = create_diagonal_backward_out_typed_handle();
    return op.call(grad_output, input_sizes, offset, dim1, dim2, out);
}

// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diagonal_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
    
    static auto op = create_diagonal_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input_sizes, offset, dim1, dim2, out);
}

// aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<div_Scalar_out::schema> create_div_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(div_Scalar_out::name, div_Scalar_out::overload_name)
      .typed<div_Scalar_out::schema>();
}

// aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & div_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_div_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & div_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_div_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<div_Scalar_mode_out::schema> create_div_Scalar_mode_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(div_Scalar_mode_out::name, div_Scalar_mode_out::overload_name)
      .typed<div_Scalar_mode_out::schema>();
}

// aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
at::Tensor & div_Scalar_mode_out::call(const at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out) {
    
    static auto op = create_div_Scalar_mode_out_typed_handle();
    return op.call(self, other, rounding_mode, out);
}

// aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
at::Tensor & div_Scalar_mode_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out) {
    
    static auto op = create_div_Scalar_mode_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, rounding_mode, out);
}

// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<embedding_out::schema> create_embedding_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(embedding_out::name, embedding_out::overload_name)
      .typed<embedding_out::schema>();
}

// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & embedding_out::call(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) {
    
    static auto op = create_embedding_out_typed_handle();
    return op.call(weight, indices, padding_idx, scale_grad_by_freq, sparse, out);
}

// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & embedding_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) {
    
    static auto op = create_embedding_out_typed_handle();
    return op.redispatch(dispatchKeySet, weight, indices, padding_idx, scale_grad_by_freq, sparse, out);
}

// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<embedding_dense_backward_out::schema> create_embedding_dense_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(embedding_dense_backward_out::name, embedding_dense_backward_out::overload_name)
      .typed<embedding_dense_backward_out::schema>();
}

// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & embedding_dense_backward_out::call(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out) {
    
    static auto op = create_embedding_dense_backward_out_typed_handle();
    return op.call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out);
}

// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & embedding_dense_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out) {
    
    static auto op = create_embedding_dense_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out);
}

// aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<embedding_renorm_out::schema> create_embedding_renorm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(embedding_renorm_out::name, embedding_renorm_out::overload_name)
      .typed<embedding_renorm_out::schema>();
}

// aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & embedding_renorm_out::call(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type, at::Tensor & out) {
    
    static auto op = create_embedding_renorm_out_typed_handle();
    return op.call(self, indices, max_norm, norm_type, out);
}

// aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & embedding_renorm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type, at::Tensor & out) {
    
    static auto op = create_embedding_renorm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, max_norm, norm_type, out);
}

// aten::embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<embedding_renorm::schema> create_embedding_renorm_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(embedding_renorm::name, embedding_renorm::overload_name)
      .typed<embedding_renorm::schema>();
}

// aten::embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor
at::Tensor embedding_renorm::call(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
    
    static auto op = create_embedding_renorm_typed_handle();
    return op.call(self, indices, max_norm, norm_type);
}

// aten::embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor
at::Tensor embedding_renorm::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
    
    static auto op = create_embedding_renorm_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, max_norm, norm_type);
}

// aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_forward_only_out::schema> create__embedding_bag_forward_only_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_embedding_bag_forward_only_out::name, _embedding_bag_forward_only_out::overload_name)
      .typed<_embedding_bag_forward_only_out::schema>();
}

// aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_forward_only_out::call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
    
    static auto op = create__embedding_bag_forward_only_out_typed_handle();
    return op.call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3);
}

// aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_forward_only_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
    
    static auto op = create__embedding_bag_forward_only_out_typed_handle();
    return op.redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3);
}

// aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_out::schema> create__embedding_bag_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_embedding_bag_out::name, _embedding_bag_out::overload_name)
      .typed<_embedding_bag_out::schema>();
}

// aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_out::call(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
    
    static auto op = create__embedding_bag_out_typed_handle();
    return op.call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3);
}

// aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
    
    static auto op = create__embedding_bag_out_typed_handle();
    return op.redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3);
}

// aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_dense_backward_out::schema> create__embedding_bag_dense_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_embedding_bag_dense_backward_out::name, _embedding_bag_dense_backward_out::overload_name)
      .typed<_embedding_bag_dense_backward_out::schema>();
}

// aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _embedding_bag_dense_backward_out::call(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const ::std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out) {
    
    static auto op = create__embedding_bag_dense_backward_out_typed_handle();
    return op.call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out);
}

// aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _embedding_bag_dense_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const ::std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out) {
    
    static auto op = create__embedding_bag_dense_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out);
}

// aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_embedding_bag_per_sample_weights_backward_out::schema> create__embedding_bag_per_sample_weights_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_embedding_bag_per_sample_weights_backward_out::name, _embedding_bag_per_sample_weights_backward_out::overload_name)
      .typed<_embedding_bag_per_sample_weights_backward_out::schema>();
}

// aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _embedding_bag_per_sample_weights_backward_out::call(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out) {
    
    static auto op = create__embedding_bag_per_sample_weights_backward_out_typed_handle();
    return op.call(grad, weight, indices, offsets, offset2bag, mode, padding_idx, out);
}

// aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _embedding_bag_per_sample_weights_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out) {
    
    static auto op = create__embedding_bag_per_sample_weights_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad, weight, indices, offsets, offset2bag, mode, padding_idx, out);
}

// aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<empty_names_out::schema> create_empty_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_names_out::name, empty_names_out::overload_name)
      .typed<empty_names_out::schema>();
}

// aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_names_out::call(at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_empty_names_out_typed_handle();
    return op.call(size, names, memory_format, out);
}

// aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_empty_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, names, memory_format, out);
}

// aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<empty_permuted_out::schema> create_empty_permuted_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_permuted_out::name, empty_permuted_out::overload_name)
      .typed<empty_permuted_out::schema>();
}

// aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_permuted_out::call(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::Tensor & out) {
    
    static auto op = create_empty_permuted_out_typed_handle();
    return op.call(size, physical_layout, out);
}

// aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_permuted_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::Tensor & out) {
    
    static auto op = create_empty_permuted_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, physical_layout, out);
}

// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<new_empty_out::schema> create_new_empty_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_empty_out::name, new_empty_out::overload_name)
      .typed<new_empty_out::schema>();
}

// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_empty_out::call(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_new_empty_out_typed_handle();
    return op.call(self, size, out);
}

// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_empty_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_new_empty_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, out);
}

// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<new_empty_strided_out::schema> create_new_empty_strided_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_empty_strided_out::name, new_empty_strided_out::overload_name)
      .typed<new_empty_strided_out::schema>();
}

// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_empty_strided_out::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_new_empty_strided_out_typed_handle();
    return op.call(self, size, stride, out);
}

// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_empty_strided_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_new_empty_strided_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, stride, out);
}

// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<new_full_out::schema> create_new_full_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_full_out::name, new_full_out::overload_name)
      .typed<new_full_out::schema>();
}

// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_full_out::call(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
    
    static auto op = create_new_full_out_typed_handle();
    return op.call(self, size, fill_value, out);
}

// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_full_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
    
    static auto op = create_new_full_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, fill_value, out);
}

// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<new_zeros_out::schema> create_new_zeros_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_zeros_out::name, new_zeros_out::overload_name)
      .typed<new_zeros_out::schema>();
}

// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_zeros_out::call(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_new_zeros_out_typed_handle();
    return op.call(self, size, out);
}

// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_zeros_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_new_zeros_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, out);
}

// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<new_ones_out::schema> create_new_ones_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(new_ones_out::name, new_ones_out::overload_name)
      .typed<new_ones_out::schema>();
}

// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_ones_out::call(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_new_ones_out_typed_handle();
    return op.call(self, size, out);
}

// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & new_ones_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_new_ones_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, out);
}

// aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_empty_affine_quantized_out::schema> create__empty_affine_quantized_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_empty_affine_quantized_out::name, _empty_affine_quantized_out::overload_name)
      .typed<_empty_affine_quantized_out::schema>();
}

// aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _empty_affine_quantized_out::call(c10::SymIntArrayRef size, double scale, int64_t zero_point, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create__empty_affine_quantized_out_typed_handle();
    return op.call(size, scale, zero_point, memory_format, out);
}

// aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _empty_affine_quantized_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, double scale, int64_t zero_point, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create__empty_affine_quantized_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, scale, zero_point, memory_format, out);
}

// aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_empty_per_channel_affine_quantized_out::schema> create__empty_per_channel_affine_quantized_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_empty_per_channel_affine_quantized_out::name, _empty_per_channel_affine_quantized_out::overload_name)
      .typed<_empty_per_channel_affine_quantized_out::schema>();
}

// aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _empty_per_channel_affine_quantized_out::call(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create__empty_per_channel_affine_quantized_out_typed_handle();
    return op.call(size, scales, zero_points, axis, memory_format, out);
}

// aten::_empty_per_channel_affine_quantized.out(SymInt[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _empty_per_channel_affine_quantized_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create__empty_per_channel_affine_quantized_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, scales, zero_points, axis, memory_format, out);
}

// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<resize_out::schema> create_resize_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(resize_out::name, resize_out::overload_name)
      .typed<resize_out::schema>();
}

// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
const at::Tensor & resize_out::call(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
    
    static auto op = create_resize_out_typed_handle();
    return op.call(self, size, memory_format, out);
}

// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
const at::Tensor & resize_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
    
    static auto op = create_resize_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, memory_format, out);
}

// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<resize::schema> create_resize_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(resize::name, resize::overload_name)
      .typed<resize::schema>();
}

// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor
at::Tensor resize::call(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_resize_typed_handle();
    return op.call(self, size, memory_format);
}

// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor
at::Tensor resize::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_resize_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, memory_format);
}

// aten::_resize_output.out(Tensor self, SymInt[] size, Device device, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_resize_output_out::schema> create__resize_output_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_resize_output_out::name, _resize_output_out::overload_name)
      .typed<_resize_output_out::schema>();
}

// aten::_resize_output.out(Tensor self, SymInt[] size, Device device, *, Tensor(a!) out) -> Tensor(a!)
const at::Tensor & _resize_output_out::call(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device, const at::Tensor & out) {
    
    static auto op = create__resize_output_out_typed_handle();
    return op.call(self, size, device, out);
}

// aten::_resize_output.out(Tensor self, SymInt[] size, Device device, *, Tensor(a!) out) -> Tensor(a!)
const at::Tensor & _resize_output_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Device device, const at::Tensor & out) {
    
    static auto op = create__resize_output_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, device, out);
}

// aten::_resize_output(Tensor self, SymInt[] size, Device device) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_resize_output::schema> create__resize_output_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_resize_output::name, _resize_output::overload_name)
      .typed<_resize_output::schema>();
}

// aten::_resize_output(Tensor self, SymInt[] size, Device device) -> Tensor
at::Tensor _resize_output::call(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) {
    
    static auto op = create__resize_output_typed_handle();
    return op.call(self, size, device);
}

// aten::_resize_output(Tensor self, SymInt[] size, Device device) -> Tensor
at::Tensor _resize_output::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) {
    
    static auto op = create__resize_output_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, device);
}

// aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<empty_quantized_out::schema> create_empty_quantized_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_quantized_out::name, empty_quantized_out::overload_name)
      .typed<empty_quantized_out::schema>();
}

// aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_quantized_out::call(at::IntArrayRef size, const at::Tensor & qtensor, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_empty_quantized_out_typed_handle();
    return op.call(size, qtensor, memory_format, out);
}

// aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_quantized_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & qtensor, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_empty_quantized_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, qtensor, memory_format, out);
}

// aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<empty_like_out::schema> create_empty_like_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_like_out::name, empty_like_out::overload_name)
      .typed<empty_like_out::schema>();
}

// aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_like_out::call(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_empty_like_out_typed_handle();
    return op.call(self, memory_format, out);
}

// aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_like_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_empty_like_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, memory_format, out);
}

// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<empty_strided_out::schema> create_empty_strided_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(empty_strided_out::name, empty_strided_out::overload_name)
      .typed<empty_strided_out::schema>();
}

// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_strided_out::call(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_empty_strided_out_typed_handle();
    return op.call(size, stride, out);
}

// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & empty_strided_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_empty_strided_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, stride, out);
}

// aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fill_Scalar_out::schema> create_fill_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fill_Scalar_out::name, fill_Scalar_out::overload_name)
      .typed<fill_Scalar_out::schema>();
}

// aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fill_Scalar_out::call(const at::Tensor & self, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_fill_Scalar_out_typed_handle();
    return op.call(self, value, out);
}

// aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fill_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_fill_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, value, out);
}

// aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<fill_Tensor_out::schema> create_fill_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fill_Tensor_out::name, fill_Tensor_out::overload_name)
      .typed<fill_Tensor_out::schema>();
}

// aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fill_Tensor_out::call(const at::Tensor & self, const at::Tensor & value, at::Tensor & out) {
    
    static auto op = create_fill_Tensor_out_typed_handle();
    return op.call(self, value, out);
}

// aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & fill_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & value, at::Tensor & out) {
    
    static auto op = create_fill_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, value, out);
}

// aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<floor_divide_Scalar_out::schema> create_floor_divide_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(floor_divide_Scalar_out::name, floor_divide_Scalar_out::overload_name)
      .typed<floor_divide_Scalar_out::schema>();
}

// aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & floor_divide_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_floor_divide_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & floor_divide_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_floor_divide_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<full_names_out::schema> create_full_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(full_names_out::name, full_names_out::overload_name)
      .typed<full_names_out::schema>();
}

// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & full_names_out::call(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_full_names_out_typed_handle();
    return op.call(size, fill_value, names, out);
}

// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & full_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_full_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, fill_value, names, out);
}

// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<full_like_out::schema> create_full_like_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(full_like_out::name, full_like_out::overload_name)
      .typed<full_like_out::schema>();
}

// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & full_like_out::call(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_full_like_out_typed_handle();
    return op.call(self, fill_value, memory_format, out);
}

// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & full_like_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_full_like_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, fill_value, memory_format, out);
}

// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<from_file_out::schema> create_from_file_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(from_file_out::name, from_file_out::overload_name)
      .typed<from_file_out::schema>();
}

// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & from_file_out::call(c10::string_view filename, ::std::optional<bool> shared, ::std::optional<int64_t> size, at::Tensor & out) {
    
    static auto op = create_from_file_out_typed_handle();
    return op.call(filename, shared, size, out);
}

// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & from_file_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, ::std::optional<bool> shared, ::std::optional<int64_t> size, at::Tensor & out) {
    
    static auto op = create_from_file_out_typed_handle();
    return op.redispatch(dispatchKeySet, filename, shared, size, out);
}

// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_2d_out::schema> create_grid_sampler_2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(grid_sampler_2d_out::name, grid_sampler_2d_out::overload_name)
      .typed<grid_sampler_2d_out::schema>();
}

// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & grid_sampler_2d_out::call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
    
    static auto op = create_grid_sampler_2d_out_typed_handle();
    return op.call(input, grid, interpolation_mode, padding_mode, align_corners, out);
}

// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & grid_sampler_2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
    
    static auto op = create_grid_sampler_2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out);
}

// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_2d_backward_out::schema> create_grid_sampler_2d_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(grid_sampler_2d_backward_out::name, grid_sampler_2d_backward_out::overload_name)
      .typed<grid_sampler_2d_backward_out::schema>();
}

// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_2d_backward_out::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_grid_sampler_2d_backward_out_typed_handle();
    return op.call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1);
}

// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_2d_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_grid_sampler_2d_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1);
}

// aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_grid_sampler_2d_cpu_fallback_out::schema> create__grid_sampler_2d_cpu_fallback_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_grid_sampler_2d_cpu_fallback_out::name, _grid_sampler_2d_cpu_fallback_out::overload_name)
      .typed<_grid_sampler_2d_cpu_fallback_out::schema>();
}

// aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _grid_sampler_2d_cpu_fallback_out::call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
    
    static auto op = create__grid_sampler_2d_cpu_fallback_out_typed_handle();
    return op.call(input, grid, interpolation_mode, padding_mode, align_corners, out);
}

// aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _grid_sampler_2d_cpu_fallback_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
    
    static auto op = create__grid_sampler_2d_cpu_fallback_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out);
}

// aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_3d_out::schema> create_grid_sampler_3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(grid_sampler_3d_out::name, grid_sampler_3d_out::overload_name)
      .typed<grid_sampler_3d_out::schema>();
}

// aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & grid_sampler_3d_out::call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
    
    static auto op = create_grid_sampler_3d_out_typed_handle();
    return op.call(input, grid, interpolation_mode, padding_mode, align_corners, out);
}

// aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & grid_sampler_3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
    
    static auto op = create_grid_sampler_3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out);
}

// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<grid_sampler_3d_backward_out::schema> create_grid_sampler_3d_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(grid_sampler_3d_backward_out::name, grid_sampler_3d_backward_out::overload_name)
      .typed<grid_sampler_3d_backward_out::schema>();
}

// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_3d_backward_out::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_grid_sampler_3d_backward_out_typed_handle();
    return op.call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1);
}

// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_3d_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_grid_sampler_3d_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1);
}

// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hann_window_out::schema> create_hann_window_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hann_window_out::name, hann_window_out::overload_name)
      .typed<hann_window_out::schema>();
}

// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hann_window_out::call(int64_t window_length, at::Tensor & out) {
    
    static auto op = create_hann_window_out_typed_handle();
    return op.call(window_length, out);
}

// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hann_window_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
    
    static auto op = create_hann_window_out_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, out);
}

// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hann_window_periodic_out::schema> create_hann_window_periodic_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hann_window_periodic_out::name, hann_window_periodic_out::overload_name)
      .typed<hann_window_periodic_out::schema>();
}

// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hann_window_periodic_out::call(int64_t window_length, bool periodic, at::Tensor & out) {
    
    static auto op = create_hann_window_periodic_out_typed_handle();
    return op.call(window_length, periodic, out);
}

// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hann_window_periodic_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
    
    static auto op = create_hann_window_periodic_out_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, out);
}

// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hamming_window_out::schema> create_hamming_window_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hamming_window_out::name, hamming_window_out::overload_name)
      .typed<hamming_window_out::schema>();
}

// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hamming_window_out::call(int64_t window_length, at::Tensor & out) {
    
    static auto op = create_hamming_window_out_typed_handle();
    return op.call(window_length, out);
}

// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hamming_window_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
    
    static auto op = create_hamming_window_out_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, out);
}

// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hamming_window_periodic_out::schema> create_hamming_window_periodic_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hamming_window_periodic_out::name, hamming_window_periodic_out::overload_name)
      .typed<hamming_window_periodic_out::schema>();
}

// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hamming_window_periodic_out::call(int64_t window_length, bool periodic, at::Tensor & out) {
    
    static auto op = create_hamming_window_periodic_out_typed_handle();
    return op.call(window_length, periodic, out);
}

// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hamming_window_periodic_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
    
    static auto op = create_hamming_window_periodic_out_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, out);
}

// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hamming_window_periodic_alpha_out::schema> create_hamming_window_periodic_alpha_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hamming_window_periodic_alpha_out::name, hamming_window_periodic_alpha_out::overload_name)
      .typed<hamming_window_periodic_alpha_out::schema>();
}

// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hamming_window_periodic_alpha_out::call(int64_t window_length, bool periodic, double alpha, at::Tensor & out) {
    
    static auto op = create_hamming_window_periodic_alpha_out_typed_handle();
    return op.call(window_length, periodic, alpha, out);
}

// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hamming_window_periodic_alpha_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, at::Tensor & out) {
    
    static auto op = create_hamming_window_periodic_alpha_out_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, alpha, out);
}

// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hamming_window_periodic_alpha_beta_out::schema> create_hamming_window_periodic_alpha_beta_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hamming_window_periodic_alpha_beta_out::name, hamming_window_periodic_alpha_beta_out::overload_name)
      .typed<hamming_window_periodic_alpha_beta_out::schema>();
}

// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hamming_window_periodic_alpha_beta_out::call(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) {
    
    static auto op = create_hamming_window_periodic_alpha_beta_out_typed_handle();
    return op.call(window_length, periodic, alpha, beta, out);
}

// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hamming_window_periodic_alpha_beta_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) {
    
    static auto op = create_hamming_window_periodic_alpha_beta_out_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, alpha, beta, out);
}

// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<kaiser_window_out::schema> create_kaiser_window_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(kaiser_window_out::name, kaiser_window_out::overload_name)
      .typed<kaiser_window_out::schema>();
}

// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & kaiser_window_out::call(int64_t window_length, at::Tensor & out) {
    
    static auto op = create_kaiser_window_out_typed_handle();
    return op.call(window_length, out);
}

// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & kaiser_window_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
    
    static auto op = create_kaiser_window_out_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, out);
}

// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<kaiser_window_periodic_out::schema> create_kaiser_window_periodic_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(kaiser_window_periodic_out::name, kaiser_window_periodic_out::overload_name)
      .typed<kaiser_window_periodic_out::schema>();
}

// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & kaiser_window_periodic_out::call(int64_t window_length, bool periodic, at::Tensor & out) {
    
    static auto op = create_kaiser_window_periodic_out_typed_handle();
    return op.call(window_length, periodic, out);
}

// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & kaiser_window_periodic_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
    
    static auto op = create_kaiser_window_periodic_out_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, out);
}

// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<kaiser_window_beta_out::schema> create_kaiser_window_beta_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(kaiser_window_beta_out::name, kaiser_window_beta_out::overload_name)
      .typed<kaiser_window_beta_out::schema>();
}

// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & kaiser_window_beta_out::call(int64_t window_length, bool periodic, double beta, at::Tensor & out) {
    
    static auto op = create_kaiser_window_beta_out_typed_handle();
    return op.call(window_length, periodic, beta, out);
}

// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & kaiser_window_beta_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, at::Tensor & out) {
    
    static auto op = create_kaiser_window_beta_out_typed_handle();
    return op.redispatch(dispatchKeySet, window_length, periodic, beta, out);
}

// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<native_group_norm_out::schema> create_native_group_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_group_norm_out::name, native_group_norm_out::overload_name)
      .typed<native_group_norm_out::schema>();
}

// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_out::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_native_group_norm_out_typed_handle();
    return op.call(input, weight, bias, N, C, HxW, group, eps, out0, out1, out2);
}

// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_native_group_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps, out0, out1, out2);
}

// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<native_group_norm_backward_out::schema> create_native_group_norm_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_group_norm_backward_out::name, native_group_norm_backward_out::overload_name)
      .typed<native_group_norm_backward_out::schema>();
}

// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_out::call(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_native_group_norm_backward_out_typed_handle();
    return op.call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2);
}

// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_native_group_norm_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2);
}

// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_put_out::schema> create_index_put_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_put_out::name, index_put_out::overload_name)
      .typed<index_put_out::schema>();
}

// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_put_out::call(const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out) {
    
    static auto op = create_index_put_out_typed_handle();
    return op.call(self, indices, values, accumulate, out);
}

// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_put_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out) {
    
    static auto op = create_index_put_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, values, accumulate, out);
}

// aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_index_put_impl_out::schema> create__index_put_impl_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_index_put_impl_out::name, _index_put_impl_out::overload_name)
      .typed<_index_put_impl_out::schema>();
}

// aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _index_put_impl_out::call(const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out) {
    
    static auto op = create__index_put_impl_out_typed_handle();
    return op.call(self, indices, values, accumulate, unsafe, out);
}

// aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _index_put_impl_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out) {
    
    static auto op = create__index_put_impl_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, values, accumulate, unsafe, out);
}

// aten::_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_index_put_impl::schema> create__index_put_impl_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_index_put_impl::name, _index_put_impl::overload_name)
      .typed<_index_put_impl::schema>();
}

// aten::_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor
at::Tensor _index_put_impl::call(const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
    
    static auto op = create__index_put_impl_typed_handle();
    return op.call(self, indices, values, accumulate, unsafe);
}

// aten::_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor
at::Tensor _index_put_impl::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
    
    static auto op = create__index_put_impl_typed_handle();
    return op.redispatch(dispatchKeySet, self, indices, values, accumulate, unsafe);
}

// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<isnan_out::schema> create_isnan_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isnan_out::name, isnan_out::overload_name)
      .typed<isnan_out::schema>();
}

// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isnan_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_isnan_out_typed_handle();
    return op.call(self, out);
}

// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isnan_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_isnan_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<native_layer_norm_out::schema> create_native_layer_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_layer_norm_out::name, native_layer_norm_out::overload_name)
      .typed<native_layer_norm_out::schema>();
}

// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_out::call(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_native_layer_norm_out_typed_handle();
    return op.call(input, normalized_shape, weight, bias, eps, out0, out1, out2);
}

// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_native_layer_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, normalized_shape, weight, bias, eps, out0, out1, out2);
}

// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<native_layer_norm_backward_out::schema> create_native_layer_norm_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_layer_norm_backward_out::name, native_layer_norm_backward_out::overload_name)
      .typed<native_layer_norm_backward_out::schema>();
}

// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_out::call(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_native_layer_norm_backward_out_typed_handle();
    return op.call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2);
}

// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_native_layer_norm_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2);
}

// aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<linear_backward_out::schema> create_linear_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linear_backward_out::name, linear_backward_out::overload_name)
      .typed<linear_backward_out::schema>();
}

// aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linear_backward_out::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_linear_backward_out_typed_handle();
    return op.call(self, grad_output, weight, output_mask, out0, out1, out2);
}

// aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linear_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_linear_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grad_output, weight, output_mask, out0, out1, out2);
}

// aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear_out::schema> create_mkldnn_linear_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_linear_out::name, mkldnn_linear_out::overload_name)
      .typed<mkldnn_linear_out::schema>();
}

// aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_linear_out::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & out) {
    
    static auto op = create_mkldnn_linear_out_typed_handle();
    return op.call(self, weight, bias, out);
}

// aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_linear_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & out) {
    
    static auto op = create_mkldnn_linear_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, out);
}

// aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear_backward_input_out::schema> create_mkldnn_linear_backward_input_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_linear_backward_input_out::name, mkldnn_linear_backward_input_out::overload_name)
      .typed<mkldnn_linear_backward_input_out::schema>();
}

// aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_linear_backward_input_out::call(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight, at::Tensor & out) {
    
    static auto op = create_mkldnn_linear_backward_input_out_typed_handle();
    return op.call(input_size, grad_output, weight, out);
}

// aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_linear_backward_input_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight, at::Tensor & out) {
    
    static auto op = create_mkldnn_linear_backward_input_out_typed_handle();
    return op.redispatch(dispatchKeySet, input_size, grad_output, weight, out);
}

// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear_backward_weights_out::schema> create_mkldnn_linear_backward_weights_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_linear_backward_weights_out::name, mkldnn_linear_backward_weights_out::overload_name)
      .typed<mkldnn_linear_backward_weights_out::schema>();
}

// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> mkldnn_linear_backward_weights_out::call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_mkldnn_linear_backward_weights_out_typed_handle();
    return op.call(grad_output, input, weight, bias_defined, out0, out1);
}

// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> mkldnn_linear_backward_weights_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_mkldnn_linear_backward_weights_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input, weight, bias_defined, out0, out1);
}

// aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_linear_backward_out::schema> create_mkldnn_linear_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_linear_backward_out::name, mkldnn_linear_backward_out::overload_name)
      .typed<mkldnn_linear_backward_out::schema>();
}

// aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_linear_backward_out::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_mkldnn_linear_backward_out_typed_handle();
    return op.call(self, grad_output, weight, output_mask, out0, out1, out2);
}

// aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_linear_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_mkldnn_linear_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grad_output, weight, output_mask, out0, out1, out2);
}

// aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<matmul_backward_out::schema> create_matmul_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(matmul_backward_out::name, matmul_backward_out::overload_name)
      .typed<matmul_backward_out::schema>();
}

// aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> matmul_backward_out::call(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_matmul_backward_out_typed_handle();
    return op.call(grad, self, other, mask, out0, out1);
}

// aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> matmul_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_matmul_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad, self, other, mask, out0, out1);
}

// aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<_aminmax_out::schema> create__aminmax_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_aminmax_out::name, _aminmax_out::overload_name)
      .typed<_aminmax_out::schema>();
}

// aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _aminmax_out::call(const at::Tensor & self, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__aminmax_out_typed_handle();
    return op.call(self, out0, out1);
}

// aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _aminmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__aminmax_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out0, out1);
}

// aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<_aminmax_dim_out::schema> create__aminmax_dim_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_aminmax_dim_out::name, _aminmax_dim_out::overload_name)
      .typed<_aminmax_dim_out::schema>();
}

// aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _aminmax_dim_out::call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__aminmax_dim_out_typed_handle();
    return op.call(self, dim, keepdim, out0, out1);
}

// aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _aminmax_dim_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__aminmax_dim_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, out0, out1);
}

// aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<max_pool2d_backward_out::schema> create_max_pool2d_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(max_pool2d_backward_out::name, max_pool2d_backward_out::overload_name)
      .typed<max_pool2d_backward_out::schema>();
}

// aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & max_pool2d_backward_out::call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_max_pool2d_backward_out_typed_handle();
    return op.call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & max_pool2d_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_max_pool2d_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool2d_out::schema> create_mkldnn_max_pool2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_max_pool2d_out::name, mkldnn_max_pool2d_out::overload_name)
      .typed<mkldnn_max_pool2d_out::schema>();
}

// aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_max_pool2d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_mkldnn_max_pool2d_out_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_max_pool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_mkldnn_max_pool2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool2d_backward_out::schema> create_mkldnn_max_pool2d_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_max_pool2d_backward_out::name, mkldnn_max_pool2d_backward_out::overload_name)
      .typed<mkldnn_max_pool2d_backward_out::schema>();
}

// aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_max_pool2d_backward_out::call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_mkldnn_max_pool2d_backward_out_typed_handle();
    return op.call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_max_pool2d_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_mkldnn_max_pool2d_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool3d_out::schema> create_mkldnn_max_pool3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_max_pool3d_out::name, mkldnn_max_pool3d_out::overload_name)
      .typed<mkldnn_max_pool3d_out::schema>();
}

// aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_max_pool3d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_mkldnn_max_pool3d_out_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_max_pool3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_mkldnn_max_pool3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_max_pool3d_backward_out::schema> create_mkldnn_max_pool3d_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_max_pool3d_backward_out::name, mkldnn_max_pool3d_backward_out::overload_name)
      .typed<mkldnn_max_pool3d_backward_out::schema>();
}

// aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_max_pool3d_backward_out::call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_mkldnn_max_pool3d_backward_out_typed_handle();
    return op.call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_max_pool3d_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_mkldnn_max_pool3d_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<quantized_max_pool1d_out::schema> create_quantized_max_pool1d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantized_max_pool1d_out::name, quantized_max_pool1d_out::overload_name)
      .typed<quantized_max_pool1d_out::schema>();
}

// aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantized_max_pool1d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_quantized_max_pool1d_out_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantized_max_pool1d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_quantized_max_pool1d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<quantized_max_pool2d_out::schema> create_quantized_max_pool2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantized_max_pool2d_out::name, quantized_max_pool2d_out::overload_name)
      .typed<quantized_max_pool2d_out::schema>();
}

// aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantized_max_pool2d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_quantized_max_pool2d_out_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantized_max_pool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_quantized_max_pool2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<quantized_max_pool3d_out::schema> create_quantized_max_pool3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantized_max_pool3d_out::name, quantized_max_pool3d_out::overload_name)
      .typed<quantized_max_pool3d_out::schema>();
}

// aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantized_max_pool3d_out::call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_quantized_max_pool3d_out_typed_handle();
    return op.call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantized_max_pool3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
    
    static auto op = create_quantized_max_pool3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out);
}

// aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<median_out::schema> create_median_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(median_out::name, median_out::overload_name)
      .typed<median_out::schema>();
}

// aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & median_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_median_out_typed_handle();
    return op.call(self, out);
}

// aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & median_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_median_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<nanmedian_out::schema> create_nanmedian_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(nanmedian_out::name, nanmedian_out::overload_name)
      .typed<nanmedian_out::schema>();
}

// aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nanmedian_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_nanmedian_out_typed_handle();
    return op.call(self, out);
}

// aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & nanmedian_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_nanmedian_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_mps_convolution_out::schema> create__mps_convolution_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_mps_convolution_out::name, _mps_convolution_out::overload_name)
      .typed<_mps_convolution_out::schema>();
}

// aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _mps_convolution_out::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create__mps_convolution_out_typed_handle();
    return op.call(self, weight, bias, padding, stride, dilation, groups, out);
}

// aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _mps_convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create__mps_convolution_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, out);
}

// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<mps_convolution_backward_out::schema> create_mps_convolution_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mps_convolution_backward_out::name, mps_convolution_backward_out::overload_name)
      .typed<mps_convolution_backward_out::schema>();
}

// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mps_convolution_backward_out::call(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_mps_convolution_backward_out_typed_handle();
    return op.call(self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2);
}

// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mps_convolution_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_mps_convolution_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2);
}

// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_convolution_out::schema> create_mkldnn_convolution_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_convolution_out::name, mkldnn_convolution_out::overload_name)
      .typed<mkldnn_convolution_out::schema>();
}

// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_convolution_out::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create_mkldnn_convolution_out_typed_handle();
    return op.call(self, weight, bias, padding, stride, dilation, groups, out);
}

// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create_mkldnn_convolution_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, out);
}

// aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_rnn_layer_out::schema> create_mkldnn_rnn_layer_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_rnn_layer_out::name, mkldnn_rnn_layer_out::overload_name)
      .typed<mkldnn_rnn_layer_out::schema>();
}

// aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_out::call(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
    
    static auto op = create_mkldnn_rnn_layer_out_typed_handle();
    return op.call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0, out1, out2, out3);
}

// aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
    
    static auto op = create_mkldnn_rnn_layer_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0, out1, out2, out3);
}

// aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_rnn_layer_backward_out::schema> create_mkldnn_rnn_layer_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_rnn_layer_backward_out::name, mkldnn_rnn_layer_backward_out::overload_name)
      .typed<mkldnn_rnn_layer_backward_out::schema>();
}

// aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_backward_out::call(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6) {
    
    static auto op = create_mkldnn_rnn_layer_backward_out_typed_handle();
    return op.call(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace, out0, out1, out2, out3, out4, out5, out6);
}

// aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6) {
    
    static auto op = create_mkldnn_rnn_layer_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace, out0, out1, out2, out3, out4, out5, out6);
}

// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<miopen_batch_norm_out::schema> create_miopen_batch_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_batch_norm_out::name, miopen_batch_norm_out::overload_name)
      .typed<miopen_batch_norm_out::schema>();
}

// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_out::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_miopen_batch_norm_out_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2);
}

// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_miopen_batch_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2);
}

// aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<miopen_batch_norm_backward_out::schema> create_miopen_batch_norm_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_batch_norm_backward_out::name, miopen_batch_norm_backward_out::overload_name)
      .typed<miopen_batch_norm_backward_out::schema>();
}

// aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_backward_out::call(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_miopen_batch_norm_backward_out_typed_handle();
    return op.call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, out0, out1, out2);
}

// aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_miopen_batch_norm_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, out0, out1, out2);
}

// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<miopen_convolution_out::schema> create_miopen_convolution_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_convolution_out::name, miopen_convolution_out::overload_name)
      .typed<miopen_convolution_out::schema>();
}

// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & miopen_convolution_out::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) {
    
    static auto op = create_miopen_convolution_out_typed_handle();
    return op.call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
}

// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & miopen_convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) {
    
    static auto op = create_miopen_convolution_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
}

// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<miopen_convolution_transpose_out::schema> create_miopen_convolution_transpose_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_convolution_transpose_out::name, miopen_convolution_transpose_out::overload_name)
      .typed<miopen_convolution_transpose_out::schema>();
}

// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & miopen_convolution_transpose_out::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) {
    
    static auto op = create_miopen_convolution_transpose_out_typed_handle();
    return op.call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out);
}

// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & miopen_convolution_transpose_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) {
    
    static auto op = create_miopen_convolution_transpose_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out);
}

// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<miopen_depthwise_convolution_out::schema> create_miopen_depthwise_convolution_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_depthwise_convolution_out::name, miopen_depthwise_convolution_out::overload_name)
      .typed<miopen_depthwise_convolution_out::schema>();
}

// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & miopen_depthwise_convolution_out::call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) {
    
    static auto op = create_miopen_depthwise_convolution_out_typed_handle();
    return op.call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
}

// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & miopen_depthwise_convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) {
    
    static auto op = create_miopen_depthwise_convolution_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
}

// aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
static C10_NOINLINE c10::TypedOperatorHandle<miopen_rnn_out::schema> create_miopen_rnn_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_rnn_out::name, miopen_rnn_out::overload_name)
      .typed<miopen_rnn_out::schema>();
}

// aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> miopen_rnn_out::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
    
    static auto op = create_miopen_rnn_out_typed_handle();
    return op.call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
}

// aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> miopen_rnn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
    
    static auto op = create_miopen_rnn_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
}

// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<miopen_rnn_backward_out::schema> create_miopen_rnn_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(miopen_rnn_backward_out::name, miopen_rnn_backward_out::overload_name)
      .typed<miopen_rnn_backward_out::schema>();
}

// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
void miopen_rnn_backward_out::call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
    
    static auto op = create_miopen_rnn_backward_out_typed_handle();
    return op.call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
}

// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
void miopen_rnn_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
    
    static auto op = create_miopen_rnn_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
}

// aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sparse_matmul_out::schema> create__sparse_sparse_matmul_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_sparse_matmul_out::name, _sparse_sparse_matmul_out::overload_name)
      .typed<_sparse_sparse_matmul_out::schema>();
}

// aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_sparse_matmul_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create__sparse_sparse_matmul_out_typed_handle();
    return op.call(self, other, out);
}

// aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_sparse_matmul_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create__sparse_sparse_matmul_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mul_Scalar_out::schema> create_mul_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mul_Scalar_out::name, mul_Scalar_out::overload_name)
      .typed<mul_Scalar_out::schema>();
}

// aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mul_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_mul_Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mul_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create_mul_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)
static C10_NOINLINE c10::TypedOperatorHandle<_native_batch_norm_legit_functional::schema> create__native_batch_norm_legit_functional_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_native_batch_norm_legit_functional::name, _native_batch_norm_legit_functional::overload_name)
      .typed<_native_batch_norm_legit_functional::schema>();
}

// aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_functional::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) {
    
    static auto op = create__native_batch_norm_legit_functional_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, training, momentum, eps);
}

// aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_functional::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) {
    
    static auto op = create__native_batch_norm_legit_functional_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps);
}

// aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<_native_batch_norm_legit_no_training_out::schema> create__native_batch_norm_legit_no_training_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_native_batch_norm_legit_no_training_out::name, _native_batch_norm_legit_no_training_out::overload_name)
      .typed<_native_batch_norm_legit_no_training_out::schema>();
}

// aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_no_training_out::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create__native_batch_norm_legit_no_training_out_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, momentum, eps, out0, out1, out2);
}

// aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_no_training_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create__native_batch_norm_legit_no_training_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, momentum, eps, out0, out1, out2);
}

// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_stats_out::schema> create_batch_norm_stats_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_stats_out::name, batch_norm_stats_out::overload_name)
      .typed<batch_norm_stats_out::schema>();
}

// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> batch_norm_stats_out::call(const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_batch_norm_stats_out_typed_handle();
    return op.call(input, eps, out0, out1);
}

// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> batch_norm_stats_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_batch_norm_stats_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, eps, out0, out1);
}

// aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_gather_stats_out::schema> create_batch_norm_gather_stats_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_gather_stats_out::name, batch_norm_gather_stats_out::overload_name)
      .typed<batch_norm_gather_stats_out::schema>();
}

// aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_out::call(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_batch_norm_gather_stats_out_typed_handle();
    return op.call(input, mean, invstd, running_mean, running_var, momentum, eps, count, out0, out1);
}

// aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_batch_norm_gather_stats_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, count, out0, out1);
}

// aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_gather_stats_with_counts_out::schema> create_batch_norm_gather_stats_with_counts_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_gather_stats_with_counts_out::name, batch_norm_gather_stats_with_counts_out::overload_name)
      .typed<batch_norm_gather_stats_with_counts_out::schema>();
}

// aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_with_counts_out::call(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_batch_norm_gather_stats_with_counts_out_typed_handle();
    return op.call(input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1);
}

// aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_with_counts_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_batch_norm_gather_stats_with_counts_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1);
}

// aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<native_batch_norm_backward_out::schema> create_native_batch_norm_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_batch_norm_backward_out::name, native_batch_norm_backward_out::overload_name)
      .typed<native_batch_norm_backward_out::schema>();
}

// aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_backward_out::call(const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_native_batch_norm_backward_out_typed_handle();
    return op.call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask, out0, out1, out2);
}

// aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_native_batch_norm_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask, out0, out1, out2);
}

// aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_backward_reduce_out::schema> create_batch_norm_backward_reduce_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_backward_reduce_out::name, batch_norm_backward_reduce_out::overload_name)
      .typed<batch_norm_backward_reduce_out::schema>();
}

// aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> batch_norm_backward_reduce_out::call(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
    
    static auto op = create_batch_norm_backward_reduce_out_typed_handle();
    return op.call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g, out0, out1, out2, out3);
}

// aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> batch_norm_backward_reduce_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
    
    static auto op = create_batch_norm_backward_reduce_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g, out0, out1, out2, out3);
}

// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_backward_elemt_out::schema> create_batch_norm_backward_elemt_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_backward_elemt_out::name, batch_norm_backward_elemt_out::overload_name)
      .typed<batch_norm_backward_elemt_out::schema>();
}

// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & batch_norm_backward_elemt_out::call(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count, at::Tensor & out) {
    
    static auto op = create_batch_norm_backward_elemt_out_typed_handle();
    return op.call(grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count, out);
}

// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & batch_norm_backward_elemt_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count, at::Tensor & out) {
    
    static auto op = create_batch_norm_backward_elemt_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count, out);
}

// aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<batch_norm_update_stats_out::schema> create_batch_norm_update_stats_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(batch_norm_update_stats_out::name, batch_norm_update_stats_out::overload_name)
      .typed<batch_norm_update_stats_out::schema>();
}

// aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> batch_norm_update_stats_out::call(const at::Tensor & input, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_batch_norm_update_stats_out_typed_handle();
    return op.call(input, running_mean, running_var, momentum, out0, out1);
}

// aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> batch_norm_update_stats_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_batch_norm_update_stats_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, running_mean, running_var, momentum, out0, out1);
}

// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_nnpack_spatial_convolution_out::schema> create__nnpack_spatial_convolution_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nnpack_spatial_convolution_out::name, _nnpack_spatial_convolution_out::overload_name)
      .typed<_nnpack_spatial_convolution_out::schema>();
}

// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nnpack_spatial_convolution_out::call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create__nnpack_spatial_convolution_out_typed_handle();
    return op.call(input, weight, bias, padding, stride, out);
}

// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nnpack_spatial_convolution_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create__nnpack_spatial_convolution_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, padding, stride, out);
}

// aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ones_names_out::schema> create_ones_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ones_names_out::name, ones_names_out::overload_name)
      .typed<ones_names_out::schema>();
}

// aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ones_names_out::call(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_ones_names_out_typed_handle();
    return op.call(size, names, out);
}

// aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ones_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_ones_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, names, out);
}

// aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ones_like_out::schema> create_ones_like_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ones_like_out::name, ones_like_out::overload_name)
      .typed<ones_like_out::schema>();
}

// aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ones_like_out::call(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_ones_like_out_typed_handle();
    return op.call(self, memory_format, out);
}

// aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ones_like_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_ones_like_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, memory_format, out);
}

// aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_euclidean_dist_out::schema> create__euclidean_dist_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_euclidean_dist_out::name, _euclidean_dist_out::overload_name)
      .typed<_euclidean_dist_out::schema>();
}

// aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _euclidean_dist_out::call(const at::Tensor & x1, const at::Tensor & x2, at::Tensor & out) {
    
    static auto op = create__euclidean_dist_out_typed_handle();
    return op.call(x1, x2, out);
}

// aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _euclidean_dist_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, at::Tensor & out) {
    
    static auto op = create__euclidean_dist_out_typed_handle();
    return op.redispatch(dispatchKeySet, x1, x2, out);
}

// aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_cdist_forward_out::schema> create__cdist_forward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cdist_forward_out::name, _cdist_forward_out::overload_name)
      .typed<_cdist_forward_out::schema>();
}

// aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cdist_forward_out::call(const at::Tensor & x1, const at::Tensor & x2, double p, ::std::optional<int64_t> compute_mode, at::Tensor & out) {
    
    static auto op = create__cdist_forward_out_typed_handle();
    return op.call(x1, x2, p, compute_mode, out);
}

// aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cdist_forward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p, ::std::optional<int64_t> compute_mode, at::Tensor & out) {
    
    static auto op = create__cdist_forward_out_typed_handle();
    return op.redispatch(dispatchKeySet, x1, x2, p, compute_mode, out);
}

// aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_cdist_backward_out::schema> create__cdist_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cdist_backward_out::name, _cdist_backward_out::overload_name)
      .typed<_cdist_backward_out::schema>();
}

// aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cdist_backward_out::call(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out) {
    
    static auto op = create__cdist_backward_out_typed_handle();
    return op.call(grad, x1, x2, p, cdist, out);
}

// aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cdist_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out) {
    
    static auto op = create__cdist_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad, x1, x2, p, cdist, out);
}

// aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_pdist_forward_out::schema> create__pdist_forward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_pdist_forward_out::name, _pdist_forward_out::overload_name)
      .typed<_pdist_forward_out::schema>();
}

// aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _pdist_forward_out::call(const at::Tensor & self, double p, at::Tensor & out) {
    
    static auto op = create__pdist_forward_out_typed_handle();
    return op.call(self, p, out);
}

// aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _pdist_forward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, at::Tensor & out) {
    
    static auto op = create__pdist_forward_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, out);
}

// aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_pdist_backward_out::schema> create__pdist_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_pdist_backward_out::name, _pdist_backward_out::overload_name)
      .typed<_pdist_backward_out::schema>();
}

// aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _pdist_backward_out::call(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist, at::Tensor & out) {
    
    static auto op = create__pdist_backward_out_typed_handle();
    return op.call(grad, self, p, pdist, out);
}

// aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _pdist_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist, at::Tensor & out) {
    
    static auto op = create__pdist_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad, self, p, pdist, out);
}

// aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<pixel_shuffle_out::schema> create_pixel_shuffle_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pixel_shuffle_out::name, pixel_shuffle_out::overload_name)
      .typed<pixel_shuffle_out::schema>();
}

// aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & pixel_shuffle_out::call(const at::Tensor & self, int64_t upscale_factor, at::Tensor & out) {
    
    static auto op = create_pixel_shuffle_out_typed_handle();
    return op.call(self, upscale_factor, out);
}

// aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & pixel_shuffle_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t upscale_factor, at::Tensor & out) {
    
    static auto op = create_pixel_shuffle_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, upscale_factor, out);
}

// aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<pixel_unshuffle_out::schema> create_pixel_unshuffle_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(pixel_unshuffle_out::name, pixel_unshuffle_out::overload_name)
      .typed<pixel_unshuffle_out::schema>();
}

// aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & pixel_unshuffle_out::call(const at::Tensor & self, int64_t downscale_factor, at::Tensor & out) {
    
    static auto op = create_pixel_unshuffle_out_typed_handle();
    return op.call(self, downscale_factor, out);
}

// aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & pixel_unshuffle_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t downscale_factor, at::Tensor & out) {
    
    static auto op = create_pixel_unshuffle_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, downscale_factor, out);
}

// aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<channel_shuffle_out::schema> create_channel_shuffle_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(channel_shuffle_out::name, channel_shuffle_out::overload_name)
      .typed<channel_shuffle_out::schema>();
}

// aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & channel_shuffle_out::call(const at::Tensor & self, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create_channel_shuffle_out_typed_handle();
    return op.call(self, groups, out);
}

// aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & channel_shuffle_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt groups, at::Tensor & out) {
    
    static auto op = create_channel_shuffle_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, groups, out);
}

// aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_pin_memory_out::schema> create__pin_memory_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_pin_memory_out::name, _pin_memory_out::overload_name)
      .typed<_pin_memory_out::schema>();
}

// aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _pin_memory_out::call(const at::Tensor & self, ::std::optional<at::Device> device, at::Tensor & out) {
    
    static auto op = create__pin_memory_out_typed_handle();
    return op.call(self, device, out);
}

// aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _pin_memory_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Device> device, at::Tensor & out) {
    
    static auto op = create__pin_memory_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, device, out);
}

// aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<scalar_tensor_out::schema> create_scalar_tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(scalar_tensor_out::name, scalar_tensor_out::overload_name)
      .typed<scalar_tensor_out::schema>();
}

// aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scalar_tensor_out::call(const at::Scalar & s, at::Tensor & out) {
    
    static auto op = create_scalar_tensor_out_typed_handle();
    return op.call(s, out);
}

// aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & scalar_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & s, at::Tensor & out) {
    
    static auto op = create_scalar_tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, s, out);
}

// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rand_names_out::schema> create_rand_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_names_out::name, rand_names_out::overload_name)
      .typed<rand_names_out::schema>();
}

// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_names_out::call(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_rand_names_out_typed_handle();
    return op.call(size, names, out);
}

// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_rand_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, names, out);
}

// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rand_generator_with_names_out::schema> create_rand_generator_with_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_generator_with_names_out::name, rand_generator_with_names_out::overload_name)
      .typed<rand_generator_with_names_out::schema>();
}

// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_generator_with_names_out::call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_rand_generator_with_names_out_typed_handle();
    return op.call(size, generator, names, out);
}

// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_generator_with_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_rand_generator_with_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, generator, names, out);
}

// aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rand_like_out::schema> create_rand_like_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rand_like_out::name, rand_like_out::overload_name)
      .typed<rand_like_out::schema>();
}

// aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_like_out::call(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_rand_like_out_typed_handle();
    return op.call(self, memory_format, out);
}

// aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rand_like_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_rand_like_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, memory_format, out);
}

// aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randint_like_out::schema> create_randint_like_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_like_out::name, randint_like_out::overload_name)
      .typed<randint_like_out::schema>();
}

// aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_like_out::call(const at::Tensor & self, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_randint_like_out_typed_handle();
    return op.call(self, high, memory_format, out);
}

// aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_like_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_randint_like_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, high, memory_format, out);
}

// aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randint_like_low_dtype_out::schema> create_randint_like_low_dtype_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randint_like_low_dtype_out::name, randint_like_low_dtype_out::overload_name)
      .typed<randint_like_low_dtype_out::schema>();
}

// aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_like_low_dtype_out::call(const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_randint_like_low_dtype_out_typed_handle();
    return op.call(self, low, high, memory_format, out);
}

// aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randint_like_low_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_randint_like_low_dtype_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, low, high, memory_format, out);
}

// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randn_names_out::schema> create_randn_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randn_names_out::name, randn_names_out::overload_name)
      .typed<randn_names_out::schema>();
}

// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randn_names_out::call(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_randn_names_out_typed_handle();
    return op.call(size, names, out);
}

// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randn_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_randn_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, names, out);
}

// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randn_generator_with_names_out::schema> create_randn_generator_with_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randn_generator_with_names_out::name, randn_generator_with_names_out::overload_name)
      .typed<randn_generator_with_names_out::schema>();
}

// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randn_generator_with_names_out::call(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_randn_generator_with_names_out_typed_handle();
    return op.call(size, generator, names, out);
}

// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randn_generator_with_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_randn_generator_with_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, generator, names, out);
}

// aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<randn_like_out::schema> create_randn_like_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(randn_like_out::name, randn_like_out::overload_name)
      .typed<randn_like_out::schema>();
}

// aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randn_like_out::call(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_randn_like_out_typed_handle();
    return op.call(self, memory_format, out);
}

// aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & randn_like_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_randn_like_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, memory_format, out);
}

// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<repeat_out::schema> create_repeat_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(repeat_out::name, repeat_out::overload_name)
      .typed<repeat_out::schema>();
}

// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & repeat_out::call(const at::Tensor & self, c10::SymIntArrayRef repeats, at::Tensor & out) {
    
    static auto op = create_repeat_out_typed_handle();
    return op.call(self, repeats, out);
}

// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & repeat_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef repeats, at::Tensor & out) {
    
    static auto op = create_repeat_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, repeats, out);
}

// aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<repeat_interleave_Tensor_out::schema> create_repeat_interleave_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(repeat_interleave_Tensor_out::name, repeat_interleave_Tensor_out::overload_name)
      .typed<repeat_interleave_Tensor_out::schema>();
}

// aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & repeat_interleave_Tensor_out::call(const at::Tensor & repeats, ::std::optional<c10::SymInt> output_size, at::Tensor & out) {
    
    static auto op = create_repeat_interleave_Tensor_out_typed_handle();
    return op.call(repeats, output_size, out);
}

// aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & repeat_interleave_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & repeats, ::std::optional<c10::SymInt> output_size, at::Tensor & out) {
    
    static auto op = create_repeat_interleave_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, repeats, output_size, out);
}

// aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_mkldnn_reshape_out::schema> create__mkldnn_reshape_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_mkldnn_reshape_out::name, _mkldnn_reshape_out::overload_name)
      .typed<_mkldnn_reshape_out::schema>();
}

// aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _mkldnn_reshape_out::call(const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out) {
    
    static auto op = create__mkldnn_reshape_out_typed_handle();
    return op.call(self, shape, out);
}

// aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _mkldnn_reshape_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out) {
    
    static auto op = create__mkldnn_reshape_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, shape, out);
}

// aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<relu_out::schema> create_relu_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(relu_out::name, relu_out::overload_name)
      .typed<relu_out::schema>();
}

// aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & relu_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_relu_out_typed_handle();
    return op.call(self, out);
}

// aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & relu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_relu_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<select_backward_out::schema> create_select_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(select_backward_out::name, select_backward_out::overload_name)
      .typed<select_backward_out::schema>();
}

// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & select_backward_out::call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out) {
    
    static auto op = create_select_backward_out_typed_handle();
    return op.call(grad_output, input_sizes, dim, index, out);
}

// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & select_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out) {
    
    static auto op = create_select_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input_sizes, dim, index, out);
}

// aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<celu_out::schema> create_celu_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(celu_out::name, celu_out::overload_name)
      .typed<celu_out::schema>();
}

// aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & celu_out::call(const at::Tensor & self, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_celu_out_typed_handle();
    return op.call(self, alpha, out);
}

// aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & celu_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_celu_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, alpha, out);
}

// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<slice_backward_out::schema> create_slice_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slice_backward_out::name, slice_backward_out::overload_name)
      .typed<slice_backward_out::schema>();
}

// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slice_backward_out::call(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, at::Tensor & out) {
    
    static auto op = create_slice_backward_out_typed_handle();
    return op.call(grad_output, input_sizes, dim, start, end, step, out);
}

// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slice_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, at::Tensor & out) {
    
    static auto op = create_slice_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, input_sizes, dim, start, end, step, out);
}

// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<slice_scatter_out::schema> create_slice_scatter_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slice_scatter_out::name, slice_scatter_out::overload_name)
      .typed<slice_scatter_out::schema>();
}

// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slice_scatter_out::call(const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) {
    
    static auto op = create_slice_scatter_out_typed_handle();
    return op.call(self, src, dim, start, end, step, out);
}

// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slice_scatter_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) {
    
    static auto op = create_slice_scatter_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, src, dim, start, end, step, out);
}

// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<select_scatter_out::schema> create_select_scatter_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(select_scatter_out::name, select_scatter_out::overload_name)
      .typed<select_scatter_out::schema>();
}

// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & select_scatter_out::call(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index, at::Tensor & out) {
    
    static auto op = create_select_scatter_out_typed_handle();
    return op.call(self, src, dim, index, out);
}

// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & select_scatter_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index, at::Tensor & out) {
    
    static auto op = create_select_scatter_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, src, dim, index, out);
}

// aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<diagonal_scatter_out::schema> create_diagonal_scatter_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diagonal_scatter_out::name, diagonal_scatter_out::overload_name)
      .typed<diagonal_scatter_out::schema>();
}

// aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diagonal_scatter_out::call(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
    
    static auto op = create_diagonal_scatter_out_typed_handle();
    return op.call(self, src, offset, dim1, dim2, out);
}

// aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diagonal_scatter_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
    
    static auto op = create_diagonal_scatter_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, src, offset, dim1, dim2, out);
}

// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<as_strided_scatter_out::schema> create_as_strided_scatter_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(as_strided_scatter_out::name, as_strided_scatter_out::overload_name)
      .typed<as_strided_scatter_out::schema>();
}

// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & as_strided_scatter_out::call(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset, at::Tensor & out) {
    
    static auto op = create_as_strided_scatter_out_typed_handle();
    return op.call(self, src, size, stride, storage_offset, out);
}

// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & as_strided_scatter_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset, at::Tensor & out) {
    
    static auto op = create_as_strided_scatter_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, src, size, stride, storage_offset, out);
}

// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<unsafe_split_Tensor_out::schema> create_unsafe_split_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unsafe_split_Tensor_out::name, unsafe_split_Tensor_out::overload_name)
      .typed<unsafe_split_Tensor_out::schema>();
}

// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
void unsafe_split_Tensor_out::call(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
    
    static auto op = create_unsafe_split_Tensor_out_typed_handle();
    return op.call(self, split_size, dim, out);
}

// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
void unsafe_split_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
    
    static auto op = create_unsafe_split_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, split_size, dim, out);
}

// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<unsafe_split_with_sizes_out::schema> create_unsafe_split_with_sizes_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unsafe_split_with_sizes_out::name, unsafe_split_with_sizes_out::overload_name)
      .typed<unsafe_split_with_sizes_out::schema>();
}

// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
void unsafe_split_with_sizes_out::call(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
    
    static auto op = create_unsafe_split_with_sizes_out_typed_handle();
    return op.call(self, split_sizes, dim, out);
}

// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
void unsafe_split_with_sizes_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
    
    static auto op = create_unsafe_split_with_sizes_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, split_sizes, dim, out);
}

// aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sum_out::schema> create_sum_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sum_out::name, sum_out::overload_name)
      .typed<sum_out::schema>();
}

// aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sum_out::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_sum_out_typed_handle();
    return op.call(self, dtype, out);
}

// aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sum_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_sum_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, out);
}

// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<std_mean_correction_out::schema> create_std_mean_correction_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(std_mean_correction_out::name, std_mean_correction_out::overload_name)
      .typed<std_mean_correction_out::schema>();
}

// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> std_mean_correction_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_std_mean_correction_out_typed_handle();
    return op.call(self, dim, correction, keepdim, out0, out1);
}

// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> std_mean_correction_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_std_mean_correction_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim, out0, out1);
}

// aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<prod_out::schema> create_prod_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(prod_out::name, prod_out::overload_name)
      .typed<prod_out::schema>();
}

// aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & prod_out::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_prod_out_typed_handle();
    return op.call(self, dtype, out);
}

// aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & prod_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_prod_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, out);
}

// aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_mkldnn_transpose_out::schema> create__mkldnn_transpose_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_mkldnn_transpose_out::name, _mkldnn_transpose_out::overload_name)
      .typed<_mkldnn_transpose_out::schema>();
}

// aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _mkldnn_transpose_out::call(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) {
    
    static auto op = create__mkldnn_transpose_out_typed_handle();
    return op.call(self, dim0, dim1, out);
}

// aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _mkldnn_transpose_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) {
    
    static auto op = create__mkldnn_transpose_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim0, dim1, out);
}

// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<flip_out::schema> create_flip_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(flip_out::name, flip_out::overload_name)
      .typed<flip_out::schema>();
}

// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & flip_out::call(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) {
    
    static auto op = create_flip_out_typed_handle();
    return op.call(self, dims, out);
}

// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & flip_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) {
    
    static auto op = create_flip_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dims, out);
}

// aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<roll_out::schema> create_roll_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(roll_out::name, roll_out::overload_name)
      .typed<roll_out::schema>();
}

// aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & roll_out::call(const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) {
    
    static auto op = create_roll_out_typed_handle();
    return op.call(self, shifts, dims, out);
}

// aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & roll_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) {
    
    static auto op = create_roll_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, shifts, dims, out);
}

// aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rot90_out::schema> create_rot90_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rot90_out::name, rot90_out::overload_name)
      .typed<rot90_out::schema>();
}

// aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rot90_out::call(const at::Tensor & self, int64_t k, at::IntArrayRef dims, at::Tensor & out) {
    
    static auto op = create_rot90_out_typed_handle();
    return op.call(self, k, dims, out);
}

// aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rot90_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::IntArrayRef dims, at::Tensor & out) {
    
    static auto op = create_rot90_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, k, dims, out);
}

// aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<_transform_bias_rescale_qkv_out::schema> create__transform_bias_rescale_qkv_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_transform_bias_rescale_qkv_out::name, _transform_bias_rescale_qkv_out::overload_name)
      .typed<_transform_bias_rescale_qkv_out::schema>();
}

// aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transform_bias_rescale_qkv_out::call(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create__transform_bias_rescale_qkv_out_typed_handle();
    return op.call(qkv, qkv_bias, num_heads, out0, out1, out2);
}

// aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transform_bias_rescale_qkv_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create__transform_bias_rescale_qkv_out_typed_handle();
    return op.redispatch(dispatchKeySet, qkv, qkv_bias, num_heads, out0, out1, out2);
}

// aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_from_mask_out::schema> create__nested_tensor_from_mask_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_from_mask_out::name, _nested_tensor_from_mask_out::overload_name)
      .typed<_nested_tensor_from_mask_out::schema>();
}

// aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_from_mask_out::call(const at::Tensor & t, const at::Tensor & mask, bool mask_check, at::Tensor & out) {
    
    static auto op = create__nested_tensor_from_mask_out_typed_handle();
    return op.call(t, mask, mask_check, out);
}

// aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_from_mask_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask, bool mask_check, at::Tensor & out) {
    
    static auto op = create__nested_tensor_from_mask_out_typed_handle();
    return op.redispatch(dispatchKeySet, t, mask, mask_check, out);
}

// aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_from_padded_out::schema> create__nested_from_padded_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_from_padded_out::name, _nested_from_padded_out::overload_name)
      .typed<_nested_from_padded_out::schema>();
}

// aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_from_padded_out::call(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor & out) {
    
    static auto op = create__nested_from_padded_out_typed_handle();
    return op.call(padded, cpu_nested_shape_example, fuse_transform_0213, out);
}

// aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_from_padded_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor & out) {
    
    static auto op = create__nested_from_padded_out_typed_handle();
    return op.redispatch(dispatchKeySet, padded, cpu_nested_shape_example, fuse_transform_0213, out);
}

// aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_size_out::schema> create__nested_tensor_size_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_size_out::name, _nested_tensor_size_out::overload_name)
      .typed<_nested_tensor_size_out::schema>();
}

// aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_size_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__nested_tensor_size_out_typed_handle();
    return op.call(self, out);
}

// aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_size_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__nested_tensor_size_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_strides_out::schema> create__nested_tensor_strides_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_strides_out::name, _nested_tensor_strides_out::overload_name)
      .typed<_nested_tensor_strides_out::schema>();
}

// aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_strides_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__nested_tensor_strides_out_typed_handle();
    return op.call(self, out);
}

// aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_strides_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__nested_tensor_strides_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_nested_tensor_storage_offsets.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_storage_offsets_out::schema> create__nested_tensor_storage_offsets_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_storage_offsets_out::name, _nested_tensor_storage_offsets_out::overload_name)
      .typed<_nested_tensor_storage_offsets_out::schema>();
}

// aten::_nested_tensor_storage_offsets.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_storage_offsets_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__nested_tensor_storage_offsets_out_typed_handle();
    return op.call(self, out);
}

// aten::_nested_tensor_storage_offsets.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_storage_offsets_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__nested_tensor_storage_offsets_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_from_padded_and_nested_example_out::schema> create__nested_from_padded_and_nested_example_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_from_padded_and_nested_example_out::name, _nested_from_padded_and_nested_example_out::overload_name)
      .typed<_nested_from_padded_and_nested_example_out::schema>();
}

// aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_from_padded_and_nested_example_out::call(const at::Tensor & padded, const at::Tensor & nt_example, at::Tensor & out) {
    
    static auto op = create__nested_from_padded_and_nested_example_out_typed_handle();
    return op.call(padded, nt_example, out);
}

// aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_from_padded_and_nested_example_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & nt_example, at::Tensor & out) {
    
    static auto op = create__nested_from_padded_and_nested_example_out_typed_handle();
    return op.redispatch(dispatchKeySet, padded, nt_example, out);
}

// aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_view_from_buffer_copy_out::schema> create__nested_view_from_buffer_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_view_from_buffer_copy_out::name, _nested_view_from_buffer_copy_out::overload_name)
      .typed<_nested_view_from_buffer_copy_out::schema>();
}

// aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_view_from_buffer_copy_out::call(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets, at::Tensor & out) {
    
    static auto op = create__nested_view_from_buffer_copy_out_typed_handle();
    return op.call(self, nested_size, nested_strides, offsets, out);
}

// aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_view_from_buffer_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets, at::Tensor & out) {
    
    static auto op = create__nested_view_from_buffer_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, nested_size, nested_strides, offsets, out);
}

// aten::_nested_view_from_jagged_copy.out(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_view_from_jagged_copy_out::schema> create__nested_view_from_jagged_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_view_from_jagged_copy_out::name, _nested_view_from_jagged_copy_out::overload_name)
      .typed<_nested_view_from_jagged_copy_out::schema>();
}

// aten::_nested_view_from_jagged_copy.out(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_view_from_jagged_copy_out::call(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional<at::Tensor> & lengths, int64_t ragged_idx, const ::std::optional<at::Tensor> & min_seqlen, const ::std::optional<at::Tensor> & max_seqlen, at::Tensor & out) {
    
    static auto op = create__nested_view_from_jagged_copy_out_typed_handle();
    return op.call(self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen, out);
}

// aten::_nested_view_from_jagged_copy.out(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_view_from_jagged_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional<at::Tensor> & lengths, int64_t ragged_idx, const ::std::optional<at::Tensor> & min_seqlen, const ::std::optional<at::Tensor> & max_seqlen, at::Tensor & out) {
    
    static auto op = create__nested_view_from_jagged_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen, out);
}

// aten::_nested_get_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_get_values_copy_out::schema> create__nested_get_values_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_get_values_copy_out::name, _nested_get_values_copy_out::overload_name)
      .typed<_nested_get_values_copy_out::schema>();
}

// aten::_nested_get_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_get_values_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__nested_get_values_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::_nested_get_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_get_values_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__nested_get_values_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_trilinear_out::schema> create__trilinear_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_trilinear_out::name, _trilinear_out::overload_name)
      .typed<_trilinear_out::schema>();
}

// aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _trilinear_out::call(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim, at::Tensor & out) {
    
    static auto op = create__trilinear_out_typed_handle();
    return op.call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim, out);
}

// aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _trilinear_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim, at::Tensor & out) {
    
    static auto op = create__trilinear_out_typed_handle();
    return op.redispatch(dispatchKeySet, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim, out);
}

// aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<_unique_out::schema> create__unique_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_unique_out::name, _unique_out::overload_name)
      .typed<_unique_out::schema>();
}

// aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _unique_out::call(const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__unique_out_typed_handle();
    return op.call(self, sorted, return_inverse, out0, out1);
}

// aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _unique_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__unique_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, sorted, return_inverse, out0, out1);
}

// aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<unique_dim_out::schema> create_unique_dim_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unique_dim_out::name, unique_dim_out::overload_name)
      .typed<unique_dim_out::schema>();
}

// aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_out::call(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_unique_dim_out_typed_handle();
    return op.call(self, dim, sorted, return_inverse, return_counts, out0, out1, out2);
}

// aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_unique_dim_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, sorted, return_inverse, return_counts, out0, out1, out2);
}

// aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<unique_consecutive_out::schema> create_unique_consecutive_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unique_consecutive_out::name, unique_consecutive_out::overload_name)
      .typed<unique_consecutive_out::schema>();
}

// aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_consecutive_out::call(const at::Tensor & self, bool return_inverse, bool return_counts, ::std::optional<int64_t> dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_unique_consecutive_out_typed_handle();
    return op.call(self, return_inverse, return_counts, dim, out0, out1, out2);
}

// aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_consecutive_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool return_inverse, bool return_counts, ::std::optional<int64_t> dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_unique_consecutive_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, return_inverse, return_counts, dim, out0, out1, out2);
}

// aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<unique_dim_consecutive_out::schema> create_unique_dim_consecutive_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unique_dim_consecutive_out::name, unique_dim_consecutive_out::overload_name)
      .typed<unique_dim_consecutive_out::schema>();
}

// aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_consecutive_out::call(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_unique_dim_consecutive_out_typed_handle();
    return op.call(self, dim, return_inverse, return_counts, out0, out1, out2);
}

// aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_consecutive_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create_unique_dim_consecutive_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, return_inverse, return_counts, out0, out1, out2);
}

// aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<_unique2_out::schema> create__unique2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_unique2_out::name, _unique2_out::overload_name)
      .typed<_unique2_out::schema>();
}

// aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _unique2_out::call(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create__unique2_out_typed_handle();
    return op.call(self, sorted, return_inverse, return_counts, out0, out1, out2);
}

// aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _unique2_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create__unique2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, sorted, return_inverse, return_counts, out0, out1, out2);
}

// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_unsafe_view_out::schema> create__unsafe_view_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_unsafe_view_out::name, _unsafe_view_out::overload_name)
      .typed<_unsafe_view_out::schema>();
}

// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _unsafe_view_out::call(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create__unsafe_view_out_typed_handle();
    return op.call(self, size, out);
}

// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _unsafe_view_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create__unsafe_view_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, out);
}

// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<var_mean_correction_out::schema> create_var_mean_correction_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(var_mean_correction_out::name, var_mean_correction_out::overload_name)
      .typed<var_mean_correction_out::schema>();
}

// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> var_mean_correction_out::call(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_var_mean_correction_out_typed_handle();
    return op.call(self, dim, correction, keepdim, out0, out1);
}

// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> var_mean_correction_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_var_mean_correction_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, correction, keepdim, out0, out1);
}

// aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<_weight_norm_interface_out::schema> create__weight_norm_interface_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_weight_norm_interface_out::name, _weight_norm_interface_out::overload_name)
      .typed<_weight_norm_interface_out::schema>();
}

// aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_out::call(const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__weight_norm_interface_out_typed_handle();
    return op.call(v, g, dim, out0, out1);
}

// aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__weight_norm_interface_out_typed_handle();
    return op.redispatch(dispatchKeySet, v, g, dim, out0, out1);
}

// aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<_weight_norm_interface_backward_out::schema> create__weight_norm_interface_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_weight_norm_interface_backward_out::name, _weight_norm_interface_backward_out::overload_name)
      .typed<_weight_norm_interface_backward_out::schema>();
}

// aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_backward_out::call(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__weight_norm_interface_backward_out_typed_handle();
    return op.call(grad_w, saved_v, saved_g, saved_norms, dim, out0, out1);
}

// aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__weight_norm_interface_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_w, saved_v, saved_g, saved_norms, dim, out0, out1);
}

// aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<zeros_names_out::schema> create_zeros_names_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(zeros_names_out::name, zeros_names_out::overload_name)
      .typed<zeros_names_out::schema>();
}

// aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & zeros_names_out::call(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_zeros_names_out_typed_handle();
    return op.call(size, names, out);
}

// aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
at::Tensor & zeros_names_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
    
    static auto op = create_zeros_names_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, names, out);
}

// aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_efficientzerotensor_out::schema> create__efficientzerotensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_efficientzerotensor_out::name, _efficientzerotensor_out::overload_name)
      .typed<_efficientzerotensor_out::schema>();
}

// aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _efficientzerotensor_out::call(c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create__efficientzerotensor_out_typed_handle();
    return op.call(size, out);
}

// aten::_efficientzerotensor.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _efficientzerotensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create__efficientzerotensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, out);
}

// aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<zeros_like_out::schema> create_zeros_like_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(zeros_like_out::name, zeros_like_out::overload_name)
      .typed<zeros_like_out::schema>();
}

// aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & zeros_like_out::call(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_zeros_like_out_typed_handle();
    return op.call(self, memory_format, out);
}

// aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & zeros_like_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_zeros_like_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, memory_format, out);
}

// aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_standard_gamma_grad_out::schema> create__standard_gamma_grad_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_standard_gamma_grad_out::name, _standard_gamma_grad_out::overload_name)
      .typed<_standard_gamma_grad_out::schema>();
}

// aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _standard_gamma_grad_out::call(const at::Tensor & self, const at::Tensor & output, at::Tensor & out) {
    
    static auto op = create__standard_gamma_grad_out_typed_handle();
    return op.call(self, output, out);
}

// aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _standard_gamma_grad_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & output, at::Tensor & out) {
    
    static auto op = create__standard_gamma_grad_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output, out);
}

// aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_standard_gamma_out::schema> create__standard_gamma_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_standard_gamma_out::name, _standard_gamma_out::overload_name)
      .typed<_standard_gamma_out::schema>();
}

// aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _standard_gamma_out::call(const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create__standard_gamma_out_typed_handle();
    return op.call(self, generator, out);
}

// aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _standard_gamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create__standard_gamma_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, generator, out);
}

// aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_dirichlet_grad_out::schema> create__dirichlet_grad_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_dirichlet_grad_out::name, _dirichlet_grad_out::overload_name)
      .typed<_dirichlet_grad_out::schema>();
}

// aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _dirichlet_grad_out::call(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out) {
    
    static auto op = create__dirichlet_grad_out_typed_handle();
    return op.call(x, alpha, total, out);
}

// aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _dirichlet_grad_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out) {
    
    static auto op = create__dirichlet_grad_out_typed_handle();
    return op.redispatch(dispatchKeySet, x, alpha, total, out);
}

// aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sample_dirichlet_out::schema> create__sample_dirichlet_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sample_dirichlet_out::name, _sample_dirichlet_out::overload_name)
      .typed<_sample_dirichlet_out::schema>();
}

// aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sample_dirichlet_out::call(const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create__sample_dirichlet_out_typed_handle();
    return op.call(self, generator, out);
}

// aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sample_dirichlet_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create__sample_dirichlet_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, generator, out);
}

// aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<poisson_out::schema> create_poisson_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(poisson_out::name, poisson_out::overload_name)
      .typed<poisson_out::schema>();
}

// aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & poisson_out::call(const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_poisson_out_typed_handle();
    return op.call(self, generator, out);
}

// aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & poisson_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_poisson_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, generator, out);
}

// aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<binomial_out::schema> create_binomial_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(binomial_out::name, binomial_out::overload_name)
      .typed<binomial_out::schema>();
}

// aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & binomial_out::call(const at::Tensor & count, const at::Tensor & prob, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_binomial_out_typed_handle();
    return op.call(count, prob, generator, out);
}

// aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & binomial_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & count, const at::Tensor & prob, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_binomial_out_typed_handle();
    return op.redispatch(dispatchKeySet, count, prob, generator, out);
}

// aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<native_norm_out::schema> create_native_norm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_norm_out::name, native_norm_out::overload_name)
      .typed<native_norm_out::schema>();
}

// aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & native_norm_out::call(const at::Tensor & self, const at::Scalar & p, at::Tensor & out) {
    
    static auto op = create_native_norm_out_typed_handle();
    return op.call(self, p, out);
}

// aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & native_norm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, at::Tensor & out) {
    
    static auto op = create_native_norm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, out);
}

// aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<native_norm_ScalarOpt_dim_dtype_out::schema> create_native_norm_ScalarOpt_dim_dtype_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(native_norm_ScalarOpt_dim_dtype_out::name, native_norm_ScalarOpt_dim_dtype_out::overload_name)
      .typed<native_norm_ScalarOpt_dim_dtype_out::schema>();
}

// aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & native_norm_ScalarOpt_dim_dtype_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_native_norm_ScalarOpt_dim_dtype_out_typed_handle();
    return op.call(self, p, dim, keepdim, dtype, out);
}

// aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & native_norm_ScalarOpt_dim_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_native_norm_ScalarOpt_dim_dtype_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out);
}

// aten::_batch_norm_with_update_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)
static C10_NOINLINE c10::TypedOperatorHandle<_batch_norm_with_update_functional::schema> create__batch_norm_with_update_functional_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_batch_norm_with_update_functional::name, _batch_norm_with_update_functional::overload_name)
      .typed<_batch_norm_with_update_functional::schema>();
}

// aten::_batch_norm_with_update_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_with_update_functional::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) {
    
    static auto op = create__batch_norm_with_update_functional_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, momentum, eps);
}

// aten::_batch_norm_with_update_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_with_update_functional::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) {
    
    static auto op = create__batch_norm_with_update_functional_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, momentum, eps);
}

// aten::_batch_norm_no_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
static C10_NOINLINE c10::TypedOperatorHandle<_batch_norm_no_update_out::schema> create__batch_norm_no_update_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_batch_norm_no_update_out::name, _batch_norm_no_update_out::overload_name)
      .typed<_batch_norm_no_update_out::schema>();
}

// aten::_batch_norm_no_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _batch_norm_no_update_out::call(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
    
    static auto op = create__batch_norm_no_update_out_typed_handle();
    return op.call(input, weight, bias, running_mean, running_var, momentum, eps, out0, out1, out2, out3);
}

// aten::_batch_norm_no_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _batch_norm_no_update_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
    
    static auto op = create__batch_norm_no_update_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, momentum, eps, out0, out1, out2, out3);
}

// aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sum_dim_out::schema> create__sparse_sum_dim_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_sum_dim_out::name, _sparse_sum_dim_out::overload_name)
      .typed<_sparse_sum_dim_out::schema>();
}

// aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_sum_dim_out::call(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
    
    static auto op = create__sparse_sum_dim_out_typed_handle();
    return op.call(self, dim, out);
}

// aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_sum_dim_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
    
    static auto op = create__sparse_sum_dim_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, out);
}

// aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_sum_backward_out::schema> create__sparse_sum_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_sum_backward_out::name, _sparse_sum_backward_out::overload_name)
      .typed<_sparse_sum_backward_out::schema>();
}

// aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_sum_backward_out::call(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
    
    static auto op = create__sparse_sum_backward_out_typed_handle();
    return op.call(grad, self, dim, out);
}

// aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_sum_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
    
    static auto op = create__sparse_sum_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad, self, dim, out);
}

// aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_csr_sum_dim_dtype_out::schema> create__sparse_csr_sum_dim_dtype_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_csr_sum_dim_dtype_out::name, _sparse_csr_sum_dim_dtype_out::overload_name)
      .typed<_sparse_csr_sum_dim_dtype_out::schema>();
}

// aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_csr_sum_dim_dtype_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create__sparse_csr_sum_dim_dtype_out_typed_handle();
    return op.call(self, dim, keepdim, dtype, out);
}

// aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_csr_sum_dim_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create__sparse_csr_sum_dim_dtype_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
}

// aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_csr_prod_dim_dtype_out::schema> create__sparse_csr_prod_dim_dtype_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_csr_prod_dim_dtype_out::name, _sparse_csr_prod_dim_dtype_out::overload_name)
      .typed<_sparse_csr_prod_dim_dtype_out::schema>();
}

// aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_csr_prod_dim_dtype_out::call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create__sparse_csr_prod_dim_dtype_out_typed_handle();
    return op.call(self, dim, keepdim, dtype, out);
}

// aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_csr_prod_dim_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create__sparse_csr_prod_dim_dtype_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, keepdim, dtype, out);
}

// aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_softmax_out::schema> create__sparse_softmax_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_softmax_out::name, _sparse_softmax_out::overload_name)
      .typed<_sparse_softmax_out::schema>();
}

// aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_softmax_out::call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
    
    static auto op = create__sparse_softmax_out_typed_handle();
    return op.call(self, dim, half_to_float, out);
}

// aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_softmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
    
    static auto op = create__sparse_softmax_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, half_to_float, out);
}

// aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_softmax_backward_data_out::schema> create__sparse_softmax_backward_data_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_softmax_backward_data_out::name, _sparse_softmax_backward_data_out::overload_name)
      .typed<_sparse_softmax_backward_data_out::schema>();
}

// aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_softmax_backward_data_out::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__sparse_softmax_backward_data_out_typed_handle();
    return op.call(grad_output, output, dim, self, out);
}

// aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_softmax_backward_data_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__sparse_softmax_backward_data_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, dim, self, out);
}

// aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_log_softmax_out::schema> create__sparse_log_softmax_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_log_softmax_out::name, _sparse_log_softmax_out::overload_name)
      .typed<_sparse_log_softmax_out::schema>();
}

// aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_log_softmax_out::call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
    
    static auto op = create__sparse_log_softmax_out_typed_handle();
    return op.call(self, dim, half_to_float, out);
}

// aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_log_softmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
    
    static auto op = create__sparse_log_softmax_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, half_to_float, out);
}

// aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_log_softmax_backward_data_out::schema> create__sparse_log_softmax_backward_data_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_log_softmax_backward_data_out::name, _sparse_log_softmax_backward_data_out::overload_name)
      .typed<_sparse_log_softmax_backward_data_out::schema>();
}

// aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_log_softmax_backward_data_out::call(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__sparse_log_softmax_backward_data_out_typed_handle();
    return op.call(grad_output, output, dim, self, out);
}

// aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_log_softmax_backward_data_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__sparse_log_softmax_backward_data_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, dim, self, out);
}

// aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_spdiags_out::schema> create__spdiags_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_spdiags_out::name, _spdiags_out::overload_name)
      .typed<_spdiags_out::schema>();
}

// aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _spdiags_out::call(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, ::std::optional<at::Layout> layout, at::Tensor & out) {
    
    static auto op = create__spdiags_out_typed_handle();
    return op.call(diagonals, offsets, shape, layout, out);
}

// aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _spdiags_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, ::std::optional<at::Layout> layout, at::Tensor & out) {
    
    static auto op = create__spdiags_out_typed_handle();
    return op.redispatch(dispatchKeySet, diagonals, offsets, shape, layout, out);
}

// aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<norm_ScalarOpt_dtype_out::schema> create_norm_ScalarOpt_dtype_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_ScalarOpt_dtype_out::name, norm_ScalarOpt_dtype_out::overload_name)
      .typed<norm_ScalarOpt_dtype_out::schema>();
}

// aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_ScalarOpt_dtype_out::call(const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_norm_ScalarOpt_dtype_out_typed_handle();
    return op.call(self, p, dtype, out);
}

// aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_ScalarOpt_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_norm_ScalarOpt_dtype_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, dtype, out);
}

// aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<norm_Scalar_out::schema> create_norm_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(norm_Scalar_out::name, norm_Scalar_out::overload_name)
      .typed<norm_Scalar_out::schema>();
}

// aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_Scalar_out::call(const at::Tensor & self, const at::Scalar & p, at::Tensor & out) {
    
    static auto op = create_norm_Scalar_out_typed_handle();
    return op.call(self, p, out);
}

// aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & norm_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, at::Tensor & out) {
    
    static auto op = create_norm_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, out);
}

// aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<clone_out::schema> create_clone_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(clone_out::name, clone_out::overload_name)
      .typed<clone_out::schema>();
}

// aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clone_out::call(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_clone_out_typed_handle();
    return op.call(self, memory_format, out);
}

// aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & clone_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create_clone_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, memory_format, out);
}

// aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<resize_as_out::schema> create_resize_as_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(resize_as_out::name, resize_as_out::overload_name)
      .typed<resize_as_out::schema>();
}

// aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
const at::Tensor & resize_as_out::call(const at::Tensor & self, const at::Tensor & the_template, ::std::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
    
    static auto op = create_resize_as_out_typed_handle();
    return op.call(self, the_template, memory_format, out);
}

// aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
const at::Tensor & resize_as_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, ::std::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
    
    static auto op = create_resize_as_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, the_template, memory_format, out);
}

// aten::resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<resize_as::schema> create_resize_as_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(resize_as::name, resize_as::overload_name)
      .typed<resize_as::schema>();
}

// aten::resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor
at::Tensor resize_as::call(const at::Tensor & self, const at::Tensor & the_template, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_resize_as_typed_handle();
    return op.call(self, the_template, memory_format);
}

// aten::resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor
at::Tensor resize_as::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, ::std::optional<at::MemoryFormat> memory_format) {
    
    static auto op = create_resize_as_typed_handle();
    return op.redispatch(dispatchKeySet, self, the_template, memory_format);
}

// aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<resize_as_sparse_out::schema> create_resize_as_sparse_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(resize_as_sparse_out::name, resize_as_sparse_out::overload_name)
      .typed<resize_as_sparse_out::schema>();
}

// aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!)
const at::Tensor & resize_as_sparse_out::call(const at::Tensor & self, const at::Tensor & the_template, const at::Tensor & out) {
    
    static auto op = create_resize_as_sparse_out_typed_handle();
    return op.call(self, the_template, out);
}

// aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!)
const at::Tensor & resize_as_sparse_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, const at::Tensor & out) {
    
    static auto op = create_resize_as_sparse_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, the_template, out);
}

// aten::resize_as_sparse(Tensor self, Tensor the_template) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<resize_as_sparse::schema> create_resize_as_sparse_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(resize_as_sparse::name, resize_as_sparse::overload_name)
      .typed<resize_as_sparse::schema>();
}

// aten::resize_as_sparse(Tensor self, Tensor the_template) -> Tensor
at::Tensor resize_as_sparse::call(const at::Tensor & self, const at::Tensor & the_template) {
    
    static auto op = create_resize_as_sparse_typed_handle();
    return op.call(self, the_template);
}

// aten::resize_as_sparse(Tensor self, Tensor the_template) -> Tensor
at::Tensor resize_as_sparse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template) {
    
    static auto op = create_resize_as_sparse_typed_handle();
    return op.redispatch(dispatchKeySet, self, the_template);
}

// aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<zero_out::schema> create_zero_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(zero_out::name, zero_out::overload_name)
      .typed<zero_out::schema>();
}

// aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & zero_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_zero_out_typed_handle();
    return op.call(self, out);
}

// aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & zero_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_zero_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::zero(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<zero::schema> create_zero_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(zero::name, zero::overload_name)
      .typed<zero::schema>();
}

// aten::zero(Tensor self) -> Tensor
at::Tensor zero::call(const at::Tensor & self) {
    
    static auto op = create_zero_typed_handle();
    return op.call(self);
}

// aten::zero(Tensor self) -> Tensor
at::Tensor zero::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_zero_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sub_Scalar_out::schema> create_sub_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sub_Scalar_out::name, sub_Scalar_out::overload_name)
      .typed<sub_Scalar_out::schema>();
}

// aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sub_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_sub_Scalar_out_typed_handle();
    return op.call(self, other, alpha, out);
}

// aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sub_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_sub_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha, out);
}

// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rsub_Tensor_out::schema> create_rsub_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rsub_Tensor_out::name, rsub_Tensor_out::overload_name)
      .typed<rsub_Tensor_out::schema>();
}

// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rsub_Tensor_out::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_rsub_Tensor_out_typed_handle();
    return op.call(self, other, alpha, out);
}

// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rsub_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_rsub_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha, out);
}

// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rsub_Scalar_out::schema> create_rsub_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rsub_Scalar_out::name, rsub_Scalar_out::overload_name)
      .typed<rsub_Scalar_out::schema>();
}

// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rsub_Scalar_out::call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_rsub_Scalar_out_typed_handle();
    return op.call(self, other, alpha, out);
}

// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rsub_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create_rsub_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha, out);
}

// aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_addmm_out::schema> create__sparse_addmm_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_addmm_out::name, _sparse_addmm_out::overload_name)
      .typed<_sparse_addmm_out::schema>();
}

// aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_addmm_out::call(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create__sparse_addmm_out_typed_handle();
    return op.call(self, mat1, mat2, beta, alpha, out);
}

// aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_addmm_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
    
    static auto op = create__sparse_addmm_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out);
}

// aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sparse_coo_tensor_size_out::schema> create_sparse_coo_tensor_size_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_coo_tensor_size_out::name, sparse_coo_tensor_size_out::overload_name)
      .typed<sparse_coo_tensor_size_out::schema>();
}

// aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sparse_coo_tensor_size_out::call(at::IntArrayRef size, at::Tensor & out) {
    
    static auto op = create_sparse_coo_tensor_size_out_typed_handle();
    return op.call(size, out);
}

// aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sparse_coo_tensor_size_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) {
    
    static auto op = create_sparse_coo_tensor_size_out_typed_handle();
    return op.redispatch(dispatchKeySet, size, out);
}

// aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_coo_tensor_with_dims_out::schema> create__sparse_coo_tensor_with_dims_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_coo_tensor_with_dims_out::name, _sparse_coo_tensor_with_dims_out::overload_name)
      .typed<_sparse_coo_tensor_with_dims_out::schema>();
}

// aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_coo_tensor_with_dims_out::call(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor & out) {
    
    static auto op = create__sparse_coo_tensor_with_dims_out_typed_handle();
    return op.call(sparse_dim, dense_dim, size, out);
}

// aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_coo_tensor_with_dims_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor & out) {
    
    static auto op = create__sparse_coo_tensor_with_dims_out_typed_handle();
    return op.redispatch(dispatchKeySet, sparse_dim, dense_dim, size, out);
}

// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_coo_tensor_with_dims_and_tensors_out::schema> create__sparse_coo_tensor_with_dims_and_tensors_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_coo_tensor_with_dims_and_tensors_out::name, _sparse_coo_tensor_with_dims_and_tensors_out::overload_name)
      .typed<_sparse_coo_tensor_with_dims_and_tensors_out::schema>();
}

// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out::call(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<bool> is_coalesced, at::Tensor & out) {
    
    static auto op = create__sparse_coo_tensor_with_dims_and_tensors_out_typed_handle();
    return op.call(sparse_dim, dense_dim, size, indices, values, is_coalesced, out);
}

// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, bool? is_coalesced=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<bool> is_coalesced, at::Tensor & out) {
    
    static auto op = create__sparse_coo_tensor_with_dims_and_tensors_out_typed_handle();
    return op.redispatch(dispatchKeySet, sparse_dim, dense_dim, size, indices, values, is_coalesced, out);
}

// aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sparse_resize_out::schema> create_sparse_resize_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_resize_out::name, sparse_resize_out::overload_name)
      .typed<sparse_resize_out::schema>();
}

// aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
const at::Tensor & sparse_resize_out::call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) {
    
    static auto op = create_sparse_resize_out_typed_handle();
    return op.call(self, size, sparse_dim, dense_dim, out);
}

// aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
const at::Tensor & sparse_resize_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) {
    
    static auto op = create_sparse_resize_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim, out);
}

// aten::sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_resize::schema> create_sparse_resize_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_resize::name, sparse_resize::overload_name)
      .typed<sparse_resize::schema>();
}

// aten::sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor
at::Tensor sparse_resize::call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
    
    static auto op = create_sparse_resize_typed_handle();
    return op.call(self, size, sparse_dim, dense_dim);
}

// aten::sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor
at::Tensor sparse_resize::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
    
    static auto op = create_sparse_resize_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim);
}

// aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sparse_resize_and_clear_out::schema> create_sparse_resize_and_clear_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_resize_and_clear_out::name, sparse_resize_and_clear_out::overload_name)
      .typed<sparse_resize_and_clear_out::schema>();
}

// aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
const at::Tensor & sparse_resize_and_clear_out::call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) {
    
    static auto op = create_sparse_resize_and_clear_out_typed_handle();
    return op.call(self, size, sparse_dim, dense_dim, out);
}

// aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
const at::Tensor & sparse_resize_and_clear_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) {
    
    static auto op = create_sparse_resize_and_clear_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim, out);
}

// aten::sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<sparse_resize_and_clear::schema> create_sparse_resize_and_clear_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_resize_and_clear::name, sparse_resize_and_clear::overload_name)
      .typed<sparse_resize_and_clear::schema>();
}

// aten::sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor
at::Tensor sparse_resize_and_clear::call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
    
    static auto op = create_sparse_resize_and_clear_typed_handle();
    return op.call(self, size, sparse_dim, dense_dim);
}

// aten::sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor
at::Tensor sparse_resize_and_clear::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
    
    static auto op = create_sparse_resize_and_clear_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim);
}

// aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<sparse_mask_out::schema> create_sparse_mask_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(sparse_mask_out::name, sparse_mask_out::overload_name)
      .typed<sparse_mask_out::schema>();
}

// aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sparse_mask_out::call(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
    
    static auto op = create_sparse_mask_out_typed_handle();
    return op.call(self, mask, out);
}

// aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & sparse_mask_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
    
    static auto op = create_sparse_mask_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, out);
}

// aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_mask_projection_out::schema> create__sparse_mask_projection_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_mask_projection_out::name, _sparse_mask_projection_out::overload_name)
      .typed<_sparse_mask_projection_out::schema>();
}

// aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_mask_projection_out::call(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches, at::Tensor & out) {
    
    static auto op = create__sparse_mask_projection_out_typed_handle();
    return op.call(self, mask, accumulate_matches, out);
}

// aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_mask_projection_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches, at::Tensor & out) {
    
    static auto op = create__sparse_mask_projection_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, accumulate_matches, out);
}

// aten::_to_dense.out(Tensor self, ScalarType? dtype=None, bool? masked_grad=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_to_dense_out::schema> create__to_dense_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_dense_out::name, _to_dense_out::overload_name)
      .typed<_to_dense_out::schema>();
}

// aten::_to_dense.out(Tensor self, ScalarType? dtype=None, bool? masked_grad=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_dense_out::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad, at::Tensor & out) {
    
    static auto op = create__to_dense_out_typed_handle();
    return op.call(self, dtype, masked_grad, out);
}

// aten::_to_dense.out(Tensor self, ScalarType? dtype=None, bool? masked_grad=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_dense_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad, at::Tensor & out) {
    
    static auto op = create__to_dense_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, masked_grad, out);
}

// aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_coalesce_out::schema> create__coalesce_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_coalesce_out::name, _coalesce_out::overload_name)
      .typed<_coalesce_out::schema>();
}

// aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _coalesce_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__coalesce_out_typed_handle();
    return op.call(self, out);
}

// aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _coalesce_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__coalesce_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_coalesced_out::schema> create__coalesced_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_coalesced_out::name, _coalesced_out::overload_name)
      .typed<_coalesced_out::schema>();
}

// aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _coalesced_out::call(const at::Tensor & self, bool coalesced, at::Tensor & out) {
    
    static auto op = create__coalesced_out_typed_handle();
    return op.call(self, coalesced, out);
}

// aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _coalesced_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool coalesced, at::Tensor & out) {
    
    static auto op = create__coalesced_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, coalesced, out);
}

// aten::_coalesced(Tensor self, bool coalesced) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<_coalesced::schema> create__coalesced_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_coalesced::name, _coalesced::overload_name)
      .typed<_coalesced::schema>();
}

// aten::_coalesced(Tensor self, bool coalesced) -> Tensor
at::Tensor _coalesced::call(const at::Tensor & self, bool coalesced) {
    
    static auto op = create__coalesced_typed_handle();
    return op.call(self, coalesced);
}

// aten::_coalesced(Tensor self, bool coalesced) -> Tensor
at::Tensor _coalesced::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool coalesced) {
    
    static auto op = create__coalesced_typed_handle();
    return op.redispatch(dispatchKeySet, self, coalesced);
}

// aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<copy_sparse_to_sparse_out::schema> create_copy_sparse_to_sparse_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(copy_sparse_to_sparse_out::name, copy_sparse_to_sparse_out::overload_name)
      .typed<copy_sparse_to_sparse_out::schema>();
}

// aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & copy_sparse_to_sparse_out::call(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) {
    
    static auto op = create_copy_sparse_to_sparse_out_typed_handle();
    return op.call(self, src, non_blocking, out);
}

// aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & copy_sparse_to_sparse_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) {
    
    static auto op = create_copy_sparse_to_sparse_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, src, non_blocking, out);
}

// aten::copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<copy_sparse_to_sparse::schema> create_copy_sparse_to_sparse_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(copy_sparse_to_sparse::name, copy_sparse_to_sparse::overload_name)
      .typed<copy_sparse_to_sparse::schema>();
}

// aten::copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
at::Tensor copy_sparse_to_sparse::call(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
    
    static auto op = create_copy_sparse_to_sparse_typed_handle();
    return op.call(self, src, non_blocking);
}

// aten::copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
at::Tensor copy_sparse_to_sparse::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
    
    static auto op = create_copy_sparse_to_sparse_typed_handle();
    return op.redispatch(dispatchKeySet, self, src, non_blocking);
}

// aten::_to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_to_sparse_sparse_dim_out::schema> create__to_sparse_sparse_dim_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_sparse_sparse_dim_out::name, _to_sparse_sparse_dim_out::overload_name)
      .typed<_to_sparse_sparse_dim_out::schema>();
}

// aten::_to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_sparse_sparse_dim_out::call(const at::Tensor & self, int64_t sparse_dim, at::Tensor & out) {
    
    static auto op = create__to_sparse_sparse_dim_out_typed_handle();
    return op.call(self, sparse_dim, out);
}

// aten::_to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_sparse_sparse_dim_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sparse_dim, at::Tensor & out) {
    
    static auto op = create__to_sparse_sparse_dim_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, sparse_dim, out);
}

// aten::_to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_to_sparse_out::schema> create__to_sparse_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_sparse_out::name, _to_sparse_out::overload_name)
      .typed<_to_sparse_out::schema>();
}

// aten::_to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_sparse_out::call(const at::Tensor & self, ::std::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, ::std::optional<int64_t> dense_dim, at::Tensor & out) {
    
    static auto op = create__to_sparse_out_typed_handle();
    return op.call(self, layout, blocksize, dense_dim, out);
}

// aten::_to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_sparse_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, ::std::optional<int64_t> dense_dim, at::Tensor & out) {
    
    static auto op = create__to_sparse_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, layout, blocksize, dense_dim, out);
}

// aten::_to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_to_sparse_csr_out::schema> create__to_sparse_csr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_sparse_csr_out::name, _to_sparse_csr_out::overload_name)
      .typed<_to_sparse_csr_out::schema>();
}

// aten::_to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_sparse_csr_out::call(const at::Tensor & self, ::std::optional<int64_t> dense_dim, at::Tensor & out) {
    
    static auto op = create__to_sparse_csr_out_typed_handle();
    return op.call(self, dense_dim, out);
}

// aten::_to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_sparse_csr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dense_dim, at::Tensor & out) {
    
    static auto op = create__to_sparse_csr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dense_dim, out);
}

// aten::_to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_to_sparse_csc_out::schema> create__to_sparse_csc_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_sparse_csc_out::name, _to_sparse_csc_out::overload_name)
      .typed<_to_sparse_csc_out::schema>();
}

// aten::_to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_sparse_csc_out::call(const at::Tensor & self, ::std::optional<int64_t> dense_dim, at::Tensor & out) {
    
    static auto op = create__to_sparse_csc_out_typed_handle();
    return op.call(self, dense_dim, out);
}

// aten::_to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_sparse_csc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dense_dim, at::Tensor & out) {
    
    static auto op = create__to_sparse_csc_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dense_dim, out);
}

// aten::_to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_to_sparse_bsr_out::schema> create__to_sparse_bsr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_sparse_bsr_out::name, _to_sparse_bsr_out::overload_name)
      .typed<_to_sparse_bsr_out::schema>();
}

// aten::_to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_sparse_bsr_out::call(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim, at::Tensor & out) {
    
    static auto op = create__to_sparse_bsr_out_typed_handle();
    return op.call(self, blocksize, dense_dim, out);
}

// aten::_to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_sparse_bsr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim, at::Tensor & out) {
    
    static auto op = create__to_sparse_bsr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, blocksize, dense_dim, out);
}

// aten::_to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_to_sparse_bsc_out::schema> create__to_sparse_bsc_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_sparse_bsc_out::name, _to_sparse_bsc_out::overload_name)
      .typed<_to_sparse_bsc_out::schema>();
}

// aten::_to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_sparse_bsc_out::call(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim, at::Tensor & out) {
    
    static auto op = create__to_sparse_bsc_out_typed_handle();
    return op.call(self, blocksize, dense_dim, out);
}

// aten::_to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_sparse_bsc_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim, at::Tensor & out) {
    
    static auto op = create__to_sparse_bsc_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, blocksize, dense_dim, out);
}

// aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<to_mkldnn_out::schema> create_to_mkldnn_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_mkldnn_out::name, to_mkldnn_out::overload_name)
      .typed<to_mkldnn_out::schema>();
}

// aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & to_mkldnn_out::call(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_to_mkldnn_out_typed_handle();
    return op.call(self, dtype, out);
}

// aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & to_mkldnn_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
    
    static auto op = create_to_mkldnn_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, out);
}

// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_reorder_conv2d_weight_out::schema> create_mkldnn_reorder_conv2d_weight_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_reorder_conv2d_weight_out::name, mkldnn_reorder_conv2d_weight_out::overload_name)
      .typed<mkldnn_reorder_conv2d_weight_out::schema>();
}

// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_reorder_conv2d_weight_out::call(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size, at::Tensor & out) {
    
    static auto op = create_mkldnn_reorder_conv2d_weight_out_typed_handle();
    return op.call(self, padding, stride, dilation, groups, input_size, out);
}

// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_reorder_conv2d_weight_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size, at::Tensor & out) {
    
    static auto op = create_mkldnn_reorder_conv2d_weight_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding, stride, dilation, groups, input_size, out);
}

// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_reorder_conv3d_weight_out::schema> create_mkldnn_reorder_conv3d_weight_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_reorder_conv3d_weight_out::name, mkldnn_reorder_conv3d_weight_out::overload_name)
      .typed<mkldnn_reorder_conv3d_weight_out::schema>();
}

// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_reorder_conv3d_weight_out::call(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size, at::Tensor & out) {
    
    static auto op = create_mkldnn_reorder_conv3d_weight_out_typed_handle();
    return op.call(self, padding, stride, dilation, groups, input_size, out);
}

// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_reorder_conv3d_weight_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size, at::Tensor & out) {
    
    static auto op = create_mkldnn_reorder_conv3d_weight_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding, stride, dilation, groups, input_size, out);
}

// aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor_dynamic_out::schema> create_quantize_per_tensor_dynamic_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor_dynamic_out::name, quantize_per_tensor_dynamic_out::overload_name)
      .typed<quantize_per_tensor_dynamic_out::schema>();
}

// aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantize_per_tensor_dynamic_out::call(const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out) {
    
    static auto op = create_quantize_per_tensor_dynamic_out_typed_handle();
    return op.call(self, dtype, reduce_range, out);
}

// aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantize_per_tensor_dynamic_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out) {
    
    static auto op = create_quantize_per_tensor_dynamic_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, reduce_range, out);
}

// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor_out::schema> create_quantize_per_tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor_out::name, quantize_per_tensor_out::overload_name)
      .typed<quantize_per_tensor_out::schema>();
}

// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantize_per_tensor_out::call(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_quantize_per_tensor_out_typed_handle();
    return op.call(self, scale, zero_point, dtype, out);
}

// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantize_per_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_quantize_per_tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, dtype, out);
}

// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor_tensor_qparams_out::schema> create_quantize_per_tensor_tensor_qparams_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor_tensor_qparams_out::name, quantize_per_tensor_tensor_qparams_out::overload_name)
      .typed<quantize_per_tensor_tensor_qparams_out::schema>();
}

// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantize_per_tensor_tensor_qparams_out::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_quantize_per_tensor_tensor_qparams_out_typed_handle();
    return op.call(self, scale, zero_point, dtype, out);
}

// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantize_per_tensor_tensor_qparams_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_quantize_per_tensor_tensor_qparams_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, dtype, out);
}

// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_tensor_tensors_out::schema> create_quantize_per_tensor_tensors_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_tensor_tensors_out::name, quantize_per_tensor_tensors_out::overload_name)
      .typed<quantize_per_tensor_tensors_out::schema>();
}

// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()
void quantize_per_tensor_tensors_out::call(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) {
    
    static auto op = create_quantize_per_tensor_tensors_out_typed_handle();
    return op.call(tensors, scales, zero_points, dtype, out);
}

// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()
void quantize_per_tensor_tensors_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) {
    
    static auto op = create_quantize_per_tensor_tensors_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, scales, zero_points, dtype, out);
}

// aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<quantize_per_channel_out::schema> create_quantize_per_channel_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(quantize_per_channel_out::name, quantize_per_channel_out::overload_name)
      .typed<quantize_per_channel_out::schema>();
}

// aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantize_per_channel_out::call(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_quantize_per_channel_out_typed_handle();
    return op.call(self, scales, zero_points, axis, dtype, out);
}

// aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & quantize_per_channel_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_quantize_per_channel_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scales, zero_points, axis, dtype, out);
}

// aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<dequantize_self_out::schema> create_dequantize_self_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dequantize_self_out::name, dequantize_self_out::overload_name)
      .typed<dequantize_self_out::schema>();
}

// aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & dequantize_self_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_dequantize_self_out_typed_handle();
    return op.call(self, out);
}

// aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & dequantize_self_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_dequantize_self_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<dequantize_tensors_out::schema> create_dequantize_tensors_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dequantize_tensors_out::name, dequantize_tensors_out::overload_name)
      .typed<dequantize_tensors_out::schema>();
}

// aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()
void dequantize_tensors_out::call(at::TensorList tensors, at::TensorList out) {
    
    static auto op = create_dequantize_tensors_out_typed_handle();
    return op.call(tensors, out);
}

// aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()
void dequantize_tensors_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::TensorList out) {
    
    static auto op = create_dequantize_tensors_out_typed_handle();
    return op.redispatch(dispatchKeySet, tensors, out);
}

// aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<q_per_channel_scales_out::schema> create_q_per_channel_scales_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(q_per_channel_scales_out::name, q_per_channel_scales_out::overload_name)
      .typed<q_per_channel_scales_out::schema>();
}

// aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & q_per_channel_scales_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_q_per_channel_scales_out_typed_handle();
    return op.call(self, out);
}

// aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & q_per_channel_scales_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_q_per_channel_scales_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<q_per_channel_zero_points_out::schema> create_q_per_channel_zero_points_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(q_per_channel_zero_points_out::name, q_per_channel_zero_points_out::overload_name)
      .typed<q_per_channel_zero_points_out::schema>();
}

// aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & q_per_channel_zero_points_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_q_per_channel_zero_points_out_typed_handle();
    return op.call(self, out);
}

// aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & q_per_channel_zero_points_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_q_per_channel_zero_points_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<int_repr_out::schema> create_int_repr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(int_repr_out::name, int_repr_out::overload_name)
      .typed<int_repr_out::schema>();
}

// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & int_repr_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_int_repr_out_typed_handle();
    return op.call(self, out);
}

// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & int_repr_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_int_repr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_make_per_tensor_quantized_tensor_out::schema> create__make_per_tensor_quantized_tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_make_per_tensor_quantized_tensor_out::name, _make_per_tensor_quantized_tensor_out::overload_name)
      .typed<_make_per_tensor_quantized_tensor_out::schema>();
}

// aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _make_per_tensor_quantized_tensor_out::call(const at::Tensor & self, double scale, int64_t zero_point, at::Tensor & out) {
    
    static auto op = create__make_per_tensor_quantized_tensor_out_typed_handle();
    return op.call(self, scale, zero_point, out);
}

// aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _make_per_tensor_quantized_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::Tensor & out) {
    
    static auto op = create__make_per_tensor_quantized_tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, out);
}

// aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_make_per_channel_quantized_tensor_out::schema> create__make_per_channel_quantized_tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_make_per_channel_quantized_tensor_out::name, _make_per_channel_quantized_tensor_out::overload_name)
      .typed<_make_per_channel_quantized_tensor_out::schema>();
}

// aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _make_per_channel_quantized_tensor_out::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out) {
    
    static auto op = create__make_per_channel_quantized_tensor_out_typed_handle();
    return op.call(self, scale, zero_point, axis, out);
}

// aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _make_per_channel_quantized_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out) {
    
    static auto op = create__make_per_channel_quantized_tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, axis, out);
}

// aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_tensor_affine_cachemask_out::schema> create_fake_quantize_per_tensor_affine_cachemask_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fake_quantize_per_tensor_affine_cachemask_out::name, fake_quantize_per_tensor_affine_cachemask_out::overload_name)
      .typed<fake_quantize_per_tensor_affine_cachemask_out::schema>();
}

// aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_out::call(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_fake_quantize_per_tensor_affine_cachemask_out_typed_handle();
    return op.call(self, scale, zero_point, quant_min, quant_max, out0, out1);
}

// aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_fake_quantize_per_tensor_affine_cachemask_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, out0, out1);
}

// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::schema> create__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::name, _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::overload_name)
      .typed<_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::schema>();
}

// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out_typed_handle();
    return op.call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1);
}

// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1);
}

// aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_learnable_per_tensor_affine_out::schema> create__fake_quantize_learnable_per_tensor_affine_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fake_quantize_learnable_per_tensor_affine_out::name, _fake_quantize_learnable_per_tensor_affine_out::overload_name)
      .typed<_fake_quantize_learnable_per_tensor_affine_out::schema>();
}

// aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _fake_quantize_learnable_per_tensor_affine_out::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) {
    
    static auto op = create__fake_quantize_learnable_per_tensor_affine_out_typed_handle();
    return op.call(self, scale, zero_point, quant_min, quant_max, grad_factor, out);
}

// aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _fake_quantize_learnable_per_tensor_affine_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) {
    
    static auto op = create__fake_quantize_learnable_per_tensor_affine_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, grad_factor, out);
}

// aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<fake_quantize_per_channel_affine_cachemask_out::schema> create_fake_quantize_per_channel_affine_cachemask_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(fake_quantize_per_channel_affine_cachemask_out::name, fake_quantize_per_channel_affine_cachemask_out::overload_name)
      .typed<fake_quantize_per_channel_affine_cachemask_out::schema>();
}

// aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_channel_affine_cachemask_out::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_fake_quantize_per_channel_affine_cachemask_out_typed_handle();
    return op.call(self, scale, zero_point, axis, quant_min, quant_max, out0, out1);
}

// aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_channel_affine_cachemask_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create_fake_quantize_per_channel_affine_cachemask_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, out0, out1);
}

// aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_fake_quantize_learnable_per_channel_affine_out::schema> create__fake_quantize_learnable_per_channel_affine_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fake_quantize_learnable_per_channel_affine_out::name, _fake_quantize_learnable_per_channel_affine_out::overload_name)
      .typed<_fake_quantize_learnable_per_channel_affine_out::schema>();
}

// aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _fake_quantize_learnable_per_channel_affine_out::call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) {
    
    static auto op = create__fake_quantize_learnable_per_channel_affine_out_typed_handle();
    return op.call(self, scale, zero_point, axis, quant_min, quant_max, grad_factor, out);
}

// aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _fake_quantize_learnable_per_channel_affine_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) {
    
    static auto op = create__fake_quantize_learnable_per_channel_affine_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, grad_factor, out);
}

// aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!))
static C10_NOINLINE c10::TypedOperatorHandle<_fused_moving_avg_obs_fq_helper_out::schema> create__fused_moving_avg_obs_fq_helper_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_moving_avg_obs_fq_helper_out::name, _fused_moving_avg_obs_fq_helper_out::overload_name)
      .typed<_fused_moving_avg_obs_fq_helper_out::schema>();
}

// aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!))
::std::tuple<at::Tensor &,at::Tensor &> _fused_moving_avg_obs_fq_helper_out::call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__fused_moving_avg_obs_fq_helper_out_typed_handle();
    return op.call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0, out1);
}

// aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!))
::std::tuple<at::Tensor &,at::Tensor &> _fused_moving_avg_obs_fq_helper_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__fused_moving_avg_obs_fq_helper_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0, out1);
}

// aten::_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out)
static C10_NOINLINE c10::TypedOperatorHandle<_fused_moving_avg_obs_fq_helper_functional::schema> create__fused_moving_avg_obs_fq_helper_functional_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_moving_avg_obs_fq_helper_functional::name, _fused_moving_avg_obs_fq_helper_functional::overload_name)
      .typed<_fused_moving_avg_obs_fq_helper_functional::schema>();
}

// aten::_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper_functional::call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
    
    static auto op = create__fused_moving_avg_obs_fq_helper_functional_typed_handle();
    return op.call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
}

// aten::_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out)
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper_functional::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
    
    static auto op = create__fused_moving_avg_obs_fq_helper_functional_typed_handle();
    return op.redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
}

// aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_to_copy_out::schema> create__to_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_to_copy_out::name, _to_copy_out::overload_name)
      .typed<_to_copy_out::schema>();
}

// aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_copy_out::call(const at::Tensor & self, bool non_blocking, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create__to_copy_out_typed_handle();
    return op.call(self, non_blocking, memory_format, out);
}

// aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _to_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
    
    static auto op = create__to_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, non_blocking, memory_format, out);
}

// aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!))
static C10_NOINLINE c10::TypedOperatorHandle<_lstm_mps_out::schema> create__lstm_mps_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_lstm_mps_out::name, _lstm_mps_out::overload_name)
      .typed<_lstm_mps_out::schema>();
}

// aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _lstm_mps_out::call(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5) {
    
    static auto op = create__lstm_mps_out_typed_handle();
    return op.call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2, out3, out4, out5);
}

// aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _lstm_mps_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5) {
    
    static auto op = create__lstm_mps_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2, out3, out4, out5);
}

// aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<lstm_mps_backward_out::schema> create_lstm_mps_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lstm_mps_backward_out::name, lstm_mps_backward_out::overload_name)
      .typed<lstm_mps_backward_out::schema>();
}

// aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> ()
void lstm_mps_backward_out::call(const ::std::optional<at::Tensor> & grad_y, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) {
    
    static auto op = create_lstm_mps_backward_out_typed_handle();
    return op.call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2);
}

// aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> ()
void lstm_mps_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const ::std::optional<at::Tensor> & grad_y, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) {
    
    static auto op = create_lstm_mps_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2);
}

// aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_lstm_cell_out::schema> create__thnn_fused_lstm_cell_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_thnn_fused_lstm_cell_out::name, _thnn_fused_lstm_cell_out::overload_name)
      .typed<_thnn_fused_lstm_cell_out::schema>();
}

// aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_out::call(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create__thnn_fused_lstm_cell_out_typed_handle();
    return op.call(input_gates, hidden_gates, cx, input_bias, hidden_bias, out0, out1, out2);
}

// aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create__thnn_fused_lstm_cell_out_typed_handle();
    return op.redispatch(dispatchKeySet, input_gates, hidden_gates, cx, input_bias, hidden_bias, out0, out1, out2);
}

// aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_lstm_cell_backward_impl_out::schema> create__thnn_fused_lstm_cell_backward_impl_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_thnn_fused_lstm_cell_backward_impl_out::name, _thnn_fused_lstm_cell_backward_impl_out::overload_name)
      .typed<_thnn_fused_lstm_cell_backward_impl_out::schema>();
}

// aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_backward_impl_out::call(const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create__thnn_fused_lstm_cell_backward_impl_out_typed_handle();
    return op.call(grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2);
}

// aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_backward_impl_out::redispatch(c10::DispatchKeySet dispatchKeySet, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create__thnn_fused_lstm_cell_backward_impl_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2);
}

// aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_gru_cell_out::schema> create__thnn_fused_gru_cell_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_thnn_fused_gru_cell_out::name, _thnn_fused_gru_cell_out::overload_name)
      .typed<_thnn_fused_gru_cell_out::schema>();
}

// aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_out::call(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__thnn_fused_gru_cell_out_typed_handle();
    return op.call(input_gates, hidden_gates, hx, input_bias, hidden_bias, out0, out1);
}

// aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__thnn_fused_gru_cell_out_typed_handle();
    return op.redispatch(dispatchKeySet, input_gates, hidden_gates, hx, input_bias, hidden_bias, out0, out1);
}

// aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
static C10_NOINLINE c10::TypedOperatorHandle<_thnn_fused_gru_cell_backward_out::schema> create__thnn_fused_gru_cell_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_thnn_fused_gru_cell_backward_out::name, _thnn_fused_gru_cell_backward_out::overload_name)
      .typed<_thnn_fused_gru_cell_backward_out::schema>();
}

// aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_backward_out::call(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
    
    static auto op = create__thnn_fused_gru_cell_backward_out_typed_handle();
    return op.call(grad_hy, workspace, has_bias, out0, out1, out2, out3, out4);
}

// aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
    
    static auto op = create__thnn_fused_gru_cell_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_hy, workspace, has_bias, out0, out1, out2, out3, out4);
}

// aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<_pack_padded_sequence_out::schema> create__pack_padded_sequence_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_pack_padded_sequence_out::name, _pack_padded_sequence_out::overload_name)
      .typed<_pack_padded_sequence_out::schema>();
}

// aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _pack_padded_sequence_out::call(const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__pack_padded_sequence_out_typed_handle();
    return op.call(input, lengths, batch_first, out0, out1);
}

// aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _pack_padded_sequence_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__pack_padded_sequence_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, lengths, batch_first, out0, out1);
}

// aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set_source_Storage_out::schema> create_set_source_Storage_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_source_Storage_out::name, set_source_Storage_out::overload_name)
      .typed<set_source_Storage_out::schema>();
}

// aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_source_Storage_out::call(const at::Tensor & self, at::Storage source, at::Tensor & out) {
    
    static auto op = create_set_source_Storage_out_typed_handle();
    return op.call(self, source, out);
}

// aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_source_Storage_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, at::Tensor & out) {
    
    static auto op = create_set_source_Storage_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, out);
}

// aten::set.source_Storage(Tensor self, Storage source) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<set_source_Storage::schema> create_set_source_Storage_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_source_Storage::name, set_source_Storage::overload_name)
      .typed<set_source_Storage::schema>();
}

// aten::set.source_Storage(Tensor self, Storage source) -> Tensor
at::Tensor set_source_Storage::call(const at::Tensor & self, at::Storage source) {
    
    static auto op = create_set_source_Storage_typed_handle();
    return op.call(self, source);
}

// aten::set.source_Storage(Tensor self, Storage source) -> Tensor
at::Tensor set_source_Storage::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source) {
    
    static auto op = create_set_source_Storage_typed_handle();
    return op.redispatch(dispatchKeySet, self, source);
}

// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set_source_Storage_storage_offset_out::schema> create_set_source_Storage_storage_offset_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_source_Storage_storage_offset_out::name, set_source_Storage_storage_offset_out::overload_name)
      .typed<set_source_Storage_storage_offset_out::schema>();
}

// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_source_Storage_storage_offset_out::call(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_set_source_Storage_storage_offset_out_typed_handle();
    return op.call(self, source, storage_offset, size, stride, out);
}

// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_source_Storage_storage_offset_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create_set_source_Storage_storage_offset_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, storage_offset, size, stride, out);
}

// aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<set_source_Storage_storage_offset::schema> create_set_source_Storage_storage_offset_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_source_Storage_storage_offset::name, set_source_Storage_storage_offset::overload_name)
      .typed<set_source_Storage_storage_offset::schema>();
}

// aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor
at::Tensor set_source_Storage_storage_offset::call(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create_set_source_Storage_storage_offset_typed_handle();
    return op.call(self, source, storage_offset, size, stride);
}

// aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor
at::Tensor set_source_Storage_storage_offset::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
    
    static auto op = create_set_source_Storage_storage_offset_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, storage_offset, size, stride);
}

// aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set_source_Tensor_out::schema> create_set_source_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_source_Tensor_out::name, set_source_Tensor_out::overload_name)
      .typed<set_source_Tensor_out::schema>();
}

// aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_source_Tensor_out::call(const at::Tensor & self, const at::Tensor & source, at::Tensor & out) {
    
    static auto op = create_set_source_Tensor_out_typed_handle();
    return op.call(self, source, out);
}

// aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_source_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & source, at::Tensor & out) {
    
    static auto op = create_set_source_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, source, out);
}

// aten::set.source_Tensor(Tensor self, Tensor source) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<set_source_Tensor::schema> create_set_source_Tensor_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_source_Tensor::name, set_source_Tensor::overload_name)
      .typed<set_source_Tensor::schema>();
}

// aten::set.source_Tensor(Tensor self, Tensor source) -> Tensor
at::Tensor set_source_Tensor::call(const at::Tensor & self, const at::Tensor & source) {
    
    static auto op = create_set_source_Tensor_typed_handle();
    return op.call(self, source);
}

// aten::set.source_Tensor(Tensor self, Tensor source) -> Tensor
at::Tensor set_source_Tensor::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & source) {
    
    static auto op = create_set_source_Tensor_typed_handle();
    return op.redispatch(dispatchKeySet, self, source);
}

// aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<set_out::schema> create_set_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set_out::name, set_out::overload_name)
      .typed<set_out::schema>();
}

// aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_set_out_typed_handle();
    return op.call(self, out);
}

// aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & set_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_set_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::set(Tensor self) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<set::schema> create_set_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(set::name, set::overload_name)
      .typed<set::schema>();
}

// aten::set(Tensor self) -> Tensor
at::Tensor set::call(const at::Tensor & self) {
    
    static auto op = create_set_typed_handle();
    return op.call(self);
}

// aten::set(Tensor self) -> Tensor
at::Tensor set::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
    
    static auto op = create_set_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lift_out::schema> create_lift_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lift_out::name, lift_out::overload_name)
      .typed<lift_out::schema>();
}

// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lift_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_lift_out_typed_handle();
    return op.call(self, out);
}

// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lift_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_lift_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<lift_fresh_copy_out::schema> create_lift_fresh_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(lift_fresh_copy_out::name, lift_fresh_copy_out::overload_name)
      .typed<lift_fresh_copy_out::schema>();
}

// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lift_fresh_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_lift_fresh_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & lift_fresh_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_lift_fresh_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<masked_fill_Scalar_out::schema> create_masked_fill_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_fill_Scalar_out::name, masked_fill_Scalar_out::overload_name)
      .typed<masked_fill_Scalar_out::schema>();
}

// aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & masked_fill_Scalar_out::call(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_masked_fill_Scalar_out_typed_handle();
    return op.call(self, mask, value, out);
}

// aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & masked_fill_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_masked_fill_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, value, out);
}

// aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<masked_fill_Tensor_out::schema> create_masked_fill_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_fill_Tensor_out::name, masked_fill_Tensor_out::overload_name)
      .typed<masked_fill_Tensor_out::schema>();
}

// aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & masked_fill_Tensor_out::call(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out) {
    
    static auto op = create_masked_fill_Tensor_out_typed_handle();
    return op.call(self, mask, value, out);
}

// aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & masked_fill_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out) {
    
    static auto op = create_masked_fill_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, value, out);
}

// aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<masked_scatter_out::schema> create_masked_scatter_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(masked_scatter_out::name, masked_scatter_out::overload_name)
      .typed<masked_scatter_out::schema>();
}

// aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & masked_scatter_out::call(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out) {
    
    static auto op = create_masked_scatter_out_typed_handle();
    return op.call(self, mask, source, out);
}

// aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & masked_scatter_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out) {
    
    static auto op = create_masked_scatter_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, source, out);
}

// aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_masked_softmax_out::schema> create__masked_softmax_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_masked_softmax_out::name, _masked_softmax_out::overload_name)
      .typed<_masked_softmax_out::schema>();
}

// aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _masked_softmax_out::call(const at::Tensor & self, const at::Tensor & mask, ::std::optional<int64_t> dim, ::std::optional<int64_t> mask_type, at::Tensor & out) {
    
    static auto op = create__masked_softmax_out_typed_handle();
    return op.call(self, mask, dim, mask_type, out);
}

// aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _masked_softmax_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, ::std::optional<int64_t> dim, ::std::optional<int64_t> mask_type, at::Tensor & out) {
    
    static auto op = create__masked_softmax_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mask, dim, mask_type, out);
}

// aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_masked_softmax_backward_out::schema> create__masked_softmax_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_masked_softmax_backward_out::name, _masked_softmax_backward_out::overload_name)
      .typed<_masked_softmax_backward_out::schema>();
}

// aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _masked_softmax_backward_out::call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, ::std::optional<int64_t> dim, at::Tensor & out) {
    
    static auto op = create__masked_softmax_backward_out_typed_handle();
    return op.call(grad_output, output, mask, dim, out);
}

// aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _masked_softmax_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, ::std::optional<int64_t> dim, at::Tensor & out) {
    
    static auto op = create__masked_softmax_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, output, mask, dim, out);
}

// aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<put_out::schema> create_put_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(put_out::name, put_out::overload_name)
      .typed<put_out::schema>();
}

// aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & put_out::call(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out) {
    
    static auto op = create_put_out_typed_handle();
    return op.call(self, index, source, accumulate, out);
}

// aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & put_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out) {
    
    static auto op = create_put_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, index, source, accumulate, out);
}

// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_fill_int_Scalar_out::schema> create_index_fill_int_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_fill_int_Scalar_out::name, index_fill_int_Scalar_out::overload_name)
      .typed<index_fill_int_Scalar_out::schema>();
}

// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_fill_int_Scalar_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_index_fill_int_Scalar_out_typed_handle();
    return op.call(self, dim, index, value, out);
}

// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_fill_int_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
    
    static auto op = create_index_fill_int_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value, out);
}

// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<index_fill_int_Tensor_out::schema> create_index_fill_int_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(index_fill_int_Tensor_out::name, index_fill_int_Tensor_out::overload_name)
      .typed<index_fill_int_Tensor_out::schema>();
}

// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_fill_int_Tensor_out::call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out) {
    
    static auto op = create_index_fill_int_Tensor_out_typed_handle();
    return op.call(self, dim, index, value, out);
}

// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & index_fill_int_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out) {
    
    static auto op = create_index_fill_int_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, value, out);
}

// aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_and_Scalar_Tensor_out::schema> create_bitwise_and_Scalar_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_and_Scalar_Tensor_out::name, bitwise_and_Scalar_Tensor_out::overload_name)
      .typed<bitwise_and_Scalar_Tensor_out::schema>();
}

// aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_and_Scalar_Tensor_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_and_Scalar_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_and_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_and_Scalar_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_or_Scalar_Tensor_out::schema> create_bitwise_or_Scalar_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_or_Scalar_Tensor_out::name, bitwise_or_Scalar_Tensor_out::overload_name)
      .typed<bitwise_or_Scalar_Tensor_out::schema>();
}

// aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_or_Scalar_Tensor_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_or_Scalar_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_or_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_or_Scalar_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_xor_Scalar_Tensor_out::schema> create_bitwise_xor_Scalar_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_xor_Scalar_Tensor_out::name, bitwise_xor_Scalar_Tensor_out::overload_name)
      .typed<bitwise_xor_Scalar_Tensor_out::schema>();
}

// aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_xor_Scalar_Tensor_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_xor_Scalar_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_xor_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_xor_Scalar_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__lshift___Scalar_out::schema> create___lshift___Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__lshift___Scalar_out::name, __lshift___Scalar_out::overload_name)
      .typed<__lshift___Scalar_out::schema>();
}

// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & __lshift___Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create___lshift___Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & __lshift___Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create___lshift___Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__lshift___Tensor_out::schema> create___lshift___Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__lshift___Tensor_out::name, __lshift___Tensor_out::overload_name)
      .typed<__lshift___Tensor_out::schema>();
}

// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & __lshift___Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create___lshift___Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & __lshift___Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create___lshift___Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_left_shift_Scalar_Tensor_out::schema> create_bitwise_left_shift_Scalar_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_left_shift_Scalar_Tensor_out::name, bitwise_left_shift_Scalar_Tensor_out::overload_name)
      .typed<bitwise_left_shift_Scalar_Tensor_out::schema>();
}

// aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_left_shift_Scalar_Tensor_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_left_shift_Scalar_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_left_shift_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_left_shift_Scalar_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__rshift___Scalar_out::schema> create___rshift___Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__rshift___Scalar_out::name, __rshift___Scalar_out::overload_name)
      .typed<__rshift___Scalar_out::schema>();
}

// aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & __rshift___Scalar_out::call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create___rshift___Scalar_out_typed_handle();
    return op.call(self, other, out);
}

// aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & __rshift___Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
    
    static auto op = create___rshift___Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<__rshift___Tensor_out::schema> create___rshift___Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(__rshift___Tensor_out::name, __rshift___Tensor_out::overload_name)
      .typed<__rshift___Tensor_out::schema>();
}

// aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & __rshift___Tensor_out::call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create___rshift___Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & __rshift___Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create___rshift___Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bitwise_right_shift_Scalar_Tensor_out::schema> create_bitwise_right_shift_Scalar_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bitwise_right_shift_Scalar_Tensor_out::name, bitwise_right_shift_Scalar_Tensor_out::overload_name)
      .typed<bitwise_right_shift_Scalar_Tensor_out::schema>();
}

// aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_right_shift_Scalar_Tensor_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_right_shift_Scalar_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bitwise_right_shift_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_bitwise_right_shift_Scalar_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<random_from_out::schema> create_random_from_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(random_from_out::name, random_from_out::overload_name)
      .typed<random_from_out::schema>();
}

// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & random_from_out::call(const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_random_from_out_typed_handle();
    return op.call(self, from, to, generator, out);
}

// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & random_from_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_random_from_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, from, to, generator, out);
}

// aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<random_from::schema> create_random_from_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(random_from::name, random_from::overload_name)
      .typed<random_from::schema>();
}

// aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor
at::Tensor random_from::call(const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_random_from_typed_handle();
    return op.call(self, from, to, generator);
}

// aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor
at::Tensor random_from::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_random_from_typed_handle();
    return op.redispatch(dispatchKeySet, self, from, to, generator);
}

// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<random_to_out::schema> create_random_to_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(random_to_out::name, random_to_out::overload_name)
      .typed<random_to_out::schema>();
}

// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & random_to_out::call(const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_random_to_out_typed_handle();
    return op.call(self, to, generator, out);
}

// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & random_to_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_random_to_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, to, generator, out);
}

// aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<random_to::schema> create_random_to_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(random_to::name, random_to::overload_name)
      .typed<random_to::schema>();
}

// aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor
at::Tensor random_to::call(const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_random_to_typed_handle();
    return op.call(self, to, generator);
}

// aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor
at::Tensor random_to::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_random_to_typed_handle();
    return op.redispatch(dispatchKeySet, self, to, generator);
}

// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<random_out::schema> create_random_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(random_out::name, random_out::overload_name)
      .typed<random_out::schema>();
}

// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & random_out::call(const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_random_out_typed_handle();
    return op.call(self, generator, out);
}

// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & random_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_random_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, generator, out);
}

// aten::random(Tensor self, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<random::schema> create_random_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(random::name, random::overload_name)
      .typed<random::schema>();
}

// aten::random(Tensor self, *, Generator? generator=None) -> Tensor
at::Tensor random::call(const at::Tensor & self, ::std::optional<at::Generator> generator) {
    
    static auto op = create_random_typed_handle();
    return op.call(self, generator);
}

// aten::random(Tensor self, *, Generator? generator=None) -> Tensor
at::Tensor random::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator) {
    
    static auto op = create_random_typed_handle();
    return op.redispatch(dispatchKeySet, self, generator);
}

// aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<uniform_out::schema> create_uniform_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(uniform_out::name, uniform_out::overload_name)
      .typed<uniform_out::schema>();
}

// aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & uniform_out::call(const at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_uniform_out_typed_handle();
    return op.call(self, from, to, generator, out);
}

// aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & uniform_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_uniform_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, from, to, generator, out);
}

// aten::uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<uniform::schema> create_uniform_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(uniform::name, uniform::overload_name)
      .typed<uniform::schema>();
}

// aten::uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor
at::Tensor uniform::call(const at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_uniform_typed_handle();
    return op.call(self, from, to, generator);
}

// aten::uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor
at::Tensor uniform::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator) {
    
    static auto op = create_uniform_typed_handle();
    return op.redispatch(dispatchKeySet, self, from, to, generator);
}

// aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<cauchy_out::schema> create_cauchy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cauchy_out::name, cauchy_out::overload_name)
      .typed<cauchy_out::schema>();
}

// aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cauchy_out::call(const at::Tensor & self, double median, double sigma, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_cauchy_out_typed_handle();
    return op.call(self, median, sigma, generator, out);
}

// aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & cauchy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double median, double sigma, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_cauchy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, median, sigma, generator, out);
}

// aten::cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<cauchy::schema> create_cauchy_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(cauchy::name, cauchy::overload_name)
      .typed<cauchy::schema>();
}

// aten::cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor
at::Tensor cauchy::call(const at::Tensor & self, double median, double sigma, ::std::optional<at::Generator> generator) {
    
    static auto op = create_cauchy_typed_handle();
    return op.call(self, median, sigma, generator);
}

// aten::cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor
at::Tensor cauchy::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double median, double sigma, ::std::optional<at::Generator> generator) {
    
    static auto op = create_cauchy_typed_handle();
    return op.redispatch(dispatchKeySet, self, median, sigma, generator);
}

// aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<log_normal_out::schema> create_log_normal_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_normal_out::name, log_normal_out::overload_name)
      .typed<log_normal_out::schema>();
}

// aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log_normal_out::call(const at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_log_normal_out_typed_handle();
    return op.call(self, mean, std, generator, out);
}

// aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & log_normal_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_log_normal_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mean, std, generator, out);
}

// aten::log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<log_normal::schema> create_log_normal_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(log_normal::name, log_normal::overload_name)
      .typed<log_normal::schema>();
}

// aten::log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor
at::Tensor log_normal::call(const at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
    
    static auto op = create_log_normal_typed_handle();
    return op.call(self, mean, std, generator);
}

// aten::log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor
at::Tensor log_normal::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
    
    static auto op = create_log_normal_typed_handle();
    return op.redispatch(dispatchKeySet, self, mean, std, generator);
}

// aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<exponential_out::schema> create_exponential_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(exponential_out::name, exponential_out::overload_name)
      .typed<exponential_out::schema>();
}

// aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & exponential_out::call(const at::Tensor & self, double lambd, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_exponential_out_typed_handle();
    return op.call(self, lambd, generator, out);
}

// aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & exponential_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double lambd, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_exponential_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, lambd, generator, out);
}

// aten::exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<exponential::schema> create_exponential_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(exponential::name, exponential::overload_name)
      .typed<exponential::schema>();
}

// aten::exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor
at::Tensor exponential::call(const at::Tensor & self, double lambd, ::std::optional<at::Generator> generator) {
    
    static auto op = create_exponential_typed_handle();
    return op.call(self, lambd, generator);
}

// aten::exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor
at::Tensor exponential::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double lambd, ::std::optional<at::Generator> generator) {
    
    static auto op = create_exponential_typed_handle();
    return op.redispatch(dispatchKeySet, self, lambd, generator);
}

// aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<geometric_out::schema> create_geometric_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(geometric_out::name, geometric_out::overload_name)
      .typed<geometric_out::schema>();
}

// aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & geometric_out::call(const at::Tensor & self, double p, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_geometric_out_typed_handle();
    return op.call(self, p, generator, out);
}

// aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & geometric_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_geometric_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator, out);
}

// aten::geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor
static C10_NOINLINE c10::TypedOperatorHandle<geometric::schema> create_geometric_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(geometric::name, geometric::overload_name)
      .typed<geometric::schema>();
}

// aten::geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor
at::Tensor geometric::call(const at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_geometric_typed_handle();
    return op.call(self, p, generator);
}

// aten::geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor
at::Tensor geometric::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
    
    static auto op = create_geometric_typed_handle();
    return op.redispatch(dispatchKeySet, self, p, generator);
}

// aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<tril_indices_out::schema> create_tril_indices_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(tril_indices_out::name, tril_indices_out::overload_name)
      .typed<tril_indices_out::schema>();
}

// aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & tril_indices_out::call(int64_t row, int64_t col, int64_t offset, at::Tensor & out) {
    
    static auto op = create_tril_indices_out_typed_handle();
    return op.call(row, col, offset, out);
}

// aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & tril_indices_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, at::Tensor & out) {
    
    static auto op = create_tril_indices_out_typed_handle();
    return op.redispatch(dispatchKeySet, row, col, offset, out);
}

// aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<triu_indices_out::schema> create_triu_indices_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(triu_indices_out::name, triu_indices_out::overload_name)
      .typed<triu_indices_out::schema>();
}

// aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & triu_indices_out::call(int64_t row, int64_t col, int64_t offset, at::Tensor & out) {
    
    static auto op = create_triu_indices_out_typed_handle();
    return op.call(row, col, offset, out);
}

// aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & triu_indices_out::redispatch(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, at::Tensor & out) {
    
    static auto op = create_triu_indices_out_typed_handle();
    return op.redispatch(dispatchKeySet, row, col, offset, out);
}

// aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<trace_out::schema> create_trace_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(trace_out::name, trace_out::overload_name)
      .typed<trace_out::schema>();
}

// aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & trace_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_trace_out_typed_handle();
    return op.call(self, out);
}

// aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & trace_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_trace_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_cholesky_solve_helper_out::schema> create__cholesky_solve_helper_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_cholesky_solve_helper_out::name, _cholesky_solve_helper_out::overload_name)
      .typed<_cholesky_solve_helper_out::schema>();
}

// aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cholesky_solve_helper_out::call(const at::Tensor & self, const at::Tensor & A, bool upper, at::Tensor & out) {
    
    static auto op = create__cholesky_solve_helper_out_typed_handle();
    return op.call(self, A, upper, out);
}

// aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _cholesky_solve_helper_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper, at::Tensor & out) {
    
    static auto op = create__cholesky_solve_helper_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, A, upper, out);
}

// aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<dist_out::schema> create_dist_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(dist_out::name, dist_out::overload_name)
      .typed<dist_out::schema>();
}

// aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & dist_out::call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p, at::Tensor & out) {
    
    static auto op = create_dist_out_typed_handle();
    return op.call(self, other, p, out);
}

// aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & dist_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & p, at::Tensor & out) {
    
    static auto op = create_dist_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, p, out);
}

// aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_histogramdd_bin_edges_out::schema> create__histogramdd_bin_edges_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_histogramdd_bin_edges_out::name, _histogramdd_bin_edges_out::overload_name)
      .typed<_histogramdd_bin_edges_out::schema>();
}

// aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()
void _histogramdd_bin_edges_out::call(const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density, at::TensorList out) {
    
    static auto op = create__histogramdd_bin_edges_out_typed_handle();
    return op.call(self, bins, range, weight, density, out);
}

// aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()
void _histogramdd_bin_edges_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density, at::TensorList out) {
    
    static auto op = create__histogramdd_bin_edges_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, range, weight, density, out);
}

// aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_histogramdd_from_bin_cts_out::schema> create__histogramdd_from_bin_cts_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_histogramdd_from_bin_cts_out::name, _histogramdd_from_bin_cts_out::overload_name)
      .typed<_histogramdd_from_bin_cts_out::schema>();
}

// aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _histogramdd_from_bin_cts_out::call(const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & out) {
    
    static auto op = create__histogramdd_from_bin_cts_out_typed_handle();
    return op.call(self, bins, range, weight, density, out);
}

// aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _histogramdd_from_bin_cts_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & out) {
    
    static auto op = create__histogramdd_from_bin_cts_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, range, weight, density, out);
}

// aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_histogramdd_from_bin_tensors_out::schema> create__histogramdd_from_bin_tensors_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_histogramdd_from_bin_tensors_out::name, _histogramdd_from_bin_tensors_out::overload_name)
      .typed<_histogramdd_from_bin_tensors_out::schema>();
}

// aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _histogramdd_from_bin_tensors_out::call(const at::Tensor & self, at::TensorList bins, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & out) {
    
    static auto op = create__histogramdd_from_bin_tensors_out_typed_handle();
    return op.call(self, bins, weight, density, out);
}

// aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _histogramdd_from_bin_tensors_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & out) {
    
    static auto op = create__histogramdd_from_bin_tensors_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, bins, weight, density, out);
}

// aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<remainder_Scalar_Tensor_out::schema> create_remainder_Scalar_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(remainder_Scalar_Tensor_out::name, remainder_Scalar_Tensor_out::overload_name)
      .typed<remainder_Scalar_Tensor_out::schema>();
}

// aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & remainder_Scalar_Tensor_out::call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_remainder_Scalar_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & remainder_Scalar_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
    
    static auto op = create_remainder_Scalar_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<unfold_backward_out::schema> create_unfold_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unfold_backward_out::name, unfold_backward_out::overload_name)
      .typed<unfold_backward_out::schema>();
}

// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & unfold_backward_out::call(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) {
    
    static auto op = create_unfold_backward_out_typed_handle();
    return op.call(grad_in, input_sizes, dim, size, step, out);
}

// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & unfold_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) {
    
    static auto op = create_unfold_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_in, input_sizes, dim, size, step, out);
}

// aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<normal_out::schema> create_normal_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(normal_out::name, normal_out::overload_name)
      .typed<normal_out::schema>();
}

// aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & normal_out::call(const at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_normal_out_typed_handle();
    return op.call(self, mean, std, generator, out);
}

// aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & normal_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator, at::Tensor & out) {
    
    static auto op = create_normal_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, mean, std, generator, out);
}

// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_amp_foreach_non_finite_check_and_unscale_out::schema> create__amp_foreach_non_finite_check_and_unscale_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_amp_foreach_non_finite_check_and_unscale_out::name, _amp_foreach_non_finite_check_and_unscale_out::overload_name)
      .typed<_amp_foreach_non_finite_check_and_unscale_out::schema>();
}

// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()
void _amp_foreach_non_finite_check_and_unscale_out::call(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) {
    
    static auto op = create__amp_foreach_non_finite_check_and_unscale_out_typed_handle();
    return op.call(self, found_inf, inv_scale, out);
}

// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()
void _amp_foreach_non_finite_check_and_unscale_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) {
    
    static auto op = create__amp_foreach_non_finite_check_and_unscale_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, found_inf, inv_scale, out);
}

// aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out)
static C10_NOINLINE c10::TypedOperatorHandle<_amp_foreach_non_finite_check_and_unscale::schema> create__amp_foreach_non_finite_check_and_unscale_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_amp_foreach_non_finite_check_and_unscale::name, _amp_foreach_non_finite_check_and_unscale::overload_name)
      .typed<_amp_foreach_non_finite_check_and_unscale::schema>();
}

// aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out)
::std::tuple<::std::vector<at::Tensor>,at::Tensor> _amp_foreach_non_finite_check_and_unscale::call(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) {
    
    static auto op = create__amp_foreach_non_finite_check_and_unscale_typed_handle();
    return op.call(self, found_inf, inv_scale);
}

// aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out)
::std::tuple<::std::vector<at::Tensor>,at::Tensor> _amp_foreach_non_finite_check_and_unscale::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) {
    
    static auto op = create__amp_foreach_non_finite_check_and_unscale_typed_handle();
    return op.redispatch(dispatchKeySet, self, found_inf, inv_scale);
}

// aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_amp_update_scale_out::schema> create__amp_update_scale_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_amp_update_scale_out::name, _amp_update_scale_out::overload_name)
      .typed<_amp_update_scale_out::schema>();
}

// aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _amp_update_scale_out::call(const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out) {
    
    static auto op = create__amp_update_scale_out_typed_handle();
    return op.call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out);
}

// aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _amp_update_scale_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out) {
    
    static auto op = create__amp_update_scale_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out);
}

// aten::_amp_update_scale(Tensor self, Tensor growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> (Tensor, Tensor growth_tracker_out)
static C10_NOINLINE c10::TypedOperatorHandle<_amp_update_scale::schema> create__amp_update_scale_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_amp_update_scale::name, _amp_update_scale::overload_name)
      .typed<_amp_update_scale::schema>();
}

// aten::_amp_update_scale(Tensor self, Tensor growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> (Tensor, Tensor growth_tracker_out)
::std::tuple<at::Tensor,at::Tensor> _amp_update_scale::call(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
    
    static auto op = create__amp_update_scale_typed_handle();
    return op.call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
}

// aten::_amp_update_scale(Tensor self, Tensor growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> (Tensor, Tensor growth_tracker_out)
::std::tuple<at::Tensor,at::Tensor> _amp_update_scale::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
    
    static auto op = create__amp_update_scale_typed_handle();
    return op.redispatch(dispatchKeySet, self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
}

// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add_Scalar_out::schema> create__foreach_add_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_add_Scalar_out::name, _foreach_add_Scalar_out::overload_name)
      .typed<_foreach_add_Scalar_out::schema>();
}

// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_add_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_add_Scalar_out_typed_handle();
    return op.call(self, scalar, out);
}

// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_add_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_add_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar, out);
}

// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add_List_out::schema> create__foreach_add_List_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_add_List_out::name, _foreach_add_List_out::overload_name)
      .typed<_foreach_add_List_out::schema>();
}

// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
void _foreach_add_List_out::call(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
    
    static auto op = create__foreach_add_List_out_typed_handle();
    return op.call(self, other, alpha, out);
}

// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
void _foreach_add_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
    
    static auto op = create__foreach_add_List_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha, out);
}

// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add_ScalarList_out::schema> create__foreach_add_ScalarList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_add_ScalarList_out::name, _foreach_add_ScalarList_out::overload_name)
      .typed<_foreach_add_ScalarList_out::schema>();
}

// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_add_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_add_ScalarList_out_typed_handle();
    return op.call(self, scalars, out);
}

// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_add_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_add_ScalarList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars, out);
}

// aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_add_Tensor_out::schema> create__foreach_add_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_add_Tensor_out::name, _foreach_add_Tensor_out::overload_name)
      .typed<_foreach_add_Tensor_out::schema>();
}

// aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
void _foreach_add_Tensor_out::call(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha, at::TensorList out) {
    
    static auto op = create__foreach_add_Tensor_out_typed_handle();
    return op.call(self, other, alpha, out);
}

// aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
void _foreach_add_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha, at::TensorList out) {
    
    static auto op = create__foreach_add_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha, out);
}

// aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub_Scalar_out::schema> create__foreach_sub_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sub_Scalar_out::name, _foreach_sub_Scalar_out::overload_name)
      .typed<_foreach_sub_Scalar_out::schema>();
}

// aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_sub_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_sub_Scalar_out_typed_handle();
    return op.call(self, scalar, out);
}

// aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_sub_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_sub_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar, out);
}

// aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub_List_out::schema> create__foreach_sub_List_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sub_List_out::name, _foreach_sub_List_out::overload_name)
      .typed<_foreach_sub_List_out::schema>();
}

// aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
void _foreach_sub_List_out::call(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
    
    static auto op = create__foreach_sub_List_out_typed_handle();
    return op.call(self, other, alpha, out);
}

// aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
void _foreach_sub_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
    
    static auto op = create__foreach_sub_List_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, alpha, out);
}

// aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sub_ScalarList_out::schema> create__foreach_sub_ScalarList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sub_ScalarList_out::name, _foreach_sub_ScalarList_out::overload_name)
      .typed<_foreach_sub_ScalarList_out::schema>();
}

// aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_sub_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_sub_ScalarList_out_typed_handle();
    return op.call(self, scalars, out);
}

// aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_sub_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_sub_ScalarList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars, out);
}

// aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul_Scalar_out::schema> create__foreach_mul_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_mul_Scalar_out::name, _foreach_mul_Scalar_out::overload_name)
      .typed<_foreach_mul_Scalar_out::schema>();
}

// aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_mul_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_mul_Scalar_out_typed_handle();
    return op.call(self, scalar, out);
}

// aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_mul_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_mul_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar, out);
}

// aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul_List_out::schema> create__foreach_mul_List_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_mul_List_out::name, _foreach_mul_List_out::overload_name)
      .typed<_foreach_mul_List_out::schema>();
}

// aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
void _foreach_mul_List_out::call(at::TensorList self, at::TensorList other, at::TensorList out) {
    
    static auto op = create__foreach_mul_List_out_typed_handle();
    return op.call(self, other, out);
}

// aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
void _foreach_mul_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
    
    static auto op = create__foreach_mul_List_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul_ScalarList_out::schema> create__foreach_mul_ScalarList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_mul_ScalarList_out::name, _foreach_mul_ScalarList_out::overload_name)
      .typed<_foreach_mul_ScalarList_out::schema>();
}

// aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_mul_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_mul_ScalarList_out_typed_handle();
    return op.call(self, scalars, out);
}

// aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_mul_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_mul_ScalarList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars, out);
}

// aten::_foreach_mul.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_mul_Tensor_out::schema> create__foreach_mul_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_mul_Tensor_out::name, _foreach_mul_Tensor_out::overload_name)
      .typed<_foreach_mul_Tensor_out::schema>();
}

// aten::_foreach_mul.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()
void _foreach_mul_Tensor_out::call(at::TensorList self, const at::Tensor & other, at::TensorList out) {
    
    static auto op = create__foreach_mul_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::_foreach_mul.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()
void _foreach_mul_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, at::TensorList out) {
    
    static auto op = create__foreach_mul_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div_Scalar_out::schema> create__foreach_div_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_div_Scalar_out::name, _foreach_div_Scalar_out::overload_name)
      .typed<_foreach_div_Scalar_out::schema>();
}

// aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_div_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_div_Scalar_out_typed_handle();
    return op.call(self, scalar, out);
}

// aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_div_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_div_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar, out);
}

// aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div_List_out::schema> create__foreach_div_List_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_div_List_out::name, _foreach_div_List_out::overload_name)
      .typed<_foreach_div_List_out::schema>();
}

// aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
void _foreach_div_List_out::call(at::TensorList self, at::TensorList other, at::TensorList out) {
    
    static auto op = create__foreach_div_List_out_typed_handle();
    return op.call(self, other, out);
}

// aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
void _foreach_div_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
    
    static auto op = create__foreach_div_List_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div_ScalarList_out::schema> create__foreach_div_ScalarList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_div_ScalarList_out::name, _foreach_div_ScalarList_out::overload_name)
      .typed<_foreach_div_ScalarList_out::schema>();
}

// aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_div_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_div_ScalarList_out_typed_handle();
    return op.call(self, scalars, out);
}

// aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_div_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_div_ScalarList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars, out);
}

// aten::_foreach_div.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_div_Tensor_out::schema> create__foreach_div_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_div_Tensor_out::name, _foreach_div_Tensor_out::overload_name)
      .typed<_foreach_div_Tensor_out::schema>();
}

// aten::_foreach_div.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()
void _foreach_div_Tensor_out::call(at::TensorList self, const at::Tensor & other, at::TensorList out) {
    
    static auto op = create__foreach_div_Tensor_out_typed_handle();
    return op.call(self, other, out);
}

// aten::_foreach_div.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()
void _foreach_div_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, at::TensorList out) {
    
    static auto op = create__foreach_div_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max_Scalar_out::schema> create__foreach_clamp_max_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_max_Scalar_out::name, _foreach_clamp_max_Scalar_out::overload_name)
      .typed<_foreach_clamp_max_Scalar_out::schema>();
}

// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_clamp_max_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_clamp_max_Scalar_out_typed_handle();
    return op.call(self, scalar, out);
}

// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_clamp_max_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_clamp_max_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar, out);
}

// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max_List_out::schema> create__foreach_clamp_max_List_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_max_List_out::name, _foreach_clamp_max_List_out::overload_name)
      .typed<_foreach_clamp_max_List_out::schema>();
}

// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
void _foreach_clamp_max_List_out::call(at::TensorList self, at::TensorList other, at::TensorList out) {
    
    static auto op = create__foreach_clamp_max_List_out_typed_handle();
    return op.call(self, other, out);
}

// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
void _foreach_clamp_max_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
    
    static auto op = create__foreach_clamp_max_List_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_max_ScalarList_out::schema> create__foreach_clamp_max_ScalarList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_max_ScalarList_out::name, _foreach_clamp_max_ScalarList_out::overload_name)
      .typed<_foreach_clamp_max_ScalarList_out::schema>();
}

// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_clamp_max_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_clamp_max_ScalarList_out_typed_handle();
    return op.call(self, scalars, out);
}

// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_clamp_max_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_clamp_max_ScalarList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars, out);
}

// aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min_Scalar_out::schema> create__foreach_clamp_min_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_min_Scalar_out::name, _foreach_clamp_min_Scalar_out::overload_name)
      .typed<_foreach_clamp_min_Scalar_out::schema>();
}

// aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_clamp_min_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_clamp_min_Scalar_out_typed_handle();
    return op.call(self, scalar, out);
}

// aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_clamp_min_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_clamp_min_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar, out);
}

// aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min_List_out::schema> create__foreach_clamp_min_List_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_min_List_out::name, _foreach_clamp_min_List_out::overload_name)
      .typed<_foreach_clamp_min_List_out::schema>();
}

// aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
void _foreach_clamp_min_List_out::call(at::TensorList self, at::TensorList other, at::TensorList out) {
    
    static auto op = create__foreach_clamp_min_List_out_typed_handle();
    return op.call(self, other, out);
}

// aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
void _foreach_clamp_min_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
    
    static auto op = create__foreach_clamp_min_List_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_clamp_min_ScalarList_out::schema> create__foreach_clamp_min_ScalarList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_clamp_min_ScalarList_out::name, _foreach_clamp_min_ScalarList_out::overload_name)
      .typed<_foreach_clamp_min_ScalarList_out::schema>();
}

// aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_clamp_min_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_clamp_min_ScalarList_out_typed_handle();
    return op.call(self, scalars, out);
}

// aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_clamp_min_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_clamp_min_ScalarList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars, out);
}

// aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum_Scalar_out::schema> create__foreach_maximum_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_maximum_Scalar_out::name, _foreach_maximum_Scalar_out::overload_name)
      .typed<_foreach_maximum_Scalar_out::schema>();
}

// aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_maximum_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_maximum_Scalar_out_typed_handle();
    return op.call(self, scalar, out);
}

// aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_maximum_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_maximum_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar, out);
}

// aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum_List_out::schema> create__foreach_maximum_List_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_maximum_List_out::name, _foreach_maximum_List_out::overload_name)
      .typed<_foreach_maximum_List_out::schema>();
}

// aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
void _foreach_maximum_List_out::call(at::TensorList self, at::TensorList other, at::TensorList out) {
    
    static auto op = create__foreach_maximum_List_out_typed_handle();
    return op.call(self, other, out);
}

// aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
void _foreach_maximum_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
    
    static auto op = create__foreach_maximum_List_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_maximum_ScalarList_out::schema> create__foreach_maximum_ScalarList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_maximum_ScalarList_out::name, _foreach_maximum_ScalarList_out::overload_name)
      .typed<_foreach_maximum_ScalarList_out::schema>();
}

// aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_maximum_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_maximum_ScalarList_out_typed_handle();
    return op.call(self, scalars, out);
}

// aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_maximum_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_maximum_ScalarList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars, out);
}

// aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum_Scalar_out::schema> create__foreach_minimum_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_minimum_Scalar_out::name, _foreach_minimum_Scalar_out::overload_name)
      .typed<_foreach_minimum_Scalar_out::schema>();
}

// aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_minimum_Scalar_out::call(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_minimum_Scalar_out_typed_handle();
    return op.call(self, scalar, out);
}

// aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
void _foreach_minimum_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
    
    static auto op = create__foreach_minimum_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalar, out);
}

// aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum_List_out::schema> create__foreach_minimum_List_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_minimum_List_out::name, _foreach_minimum_List_out::overload_name)
      .typed<_foreach_minimum_List_out::schema>();
}

// aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
void _foreach_minimum_List_out::call(at::TensorList self, at::TensorList other, at::TensorList out) {
    
    static auto op = create__foreach_minimum_List_out_typed_handle();
    return op.call(self, other, out);
}

// aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
void _foreach_minimum_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
    
    static auto op = create__foreach_minimum_List_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, other, out);
}

// aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_minimum_ScalarList_out::schema> create__foreach_minimum_ScalarList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_minimum_ScalarList_out::name, _foreach_minimum_ScalarList_out::overload_name)
      .typed<_foreach_minimum_ScalarList_out::schema>();
}

// aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_minimum_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_minimum_ScalarList_out_typed_handle();
    return op.call(self, scalars, out);
}

// aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_minimum_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_minimum_ScalarList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, scalars, out);
}

// aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv_Scalar_out::schema> create__foreach_addcdiv_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv_Scalar_out::name, _foreach_addcdiv_Scalar_out::overload_name)
      .typed<_foreach_addcdiv_Scalar_out::schema>();
}

// aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
void _foreach_addcdiv_Scalar_out::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
    
    static auto op = create__foreach_addcdiv_Scalar_out_typed_handle();
    return op.call(self, tensor1, tensor2, value, out);
}

// aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
void _foreach_addcdiv_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
    
    static auto op = create__foreach_addcdiv_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value, out);
}

// aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv_ScalarList_out::schema> create__foreach_addcdiv_ScalarList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv_ScalarList_out::name, _foreach_addcdiv_ScalarList_out::overload_name)
      .typed<_foreach_addcdiv_ScalarList_out::schema>();
}

// aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_addcdiv_ScalarList_out::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_addcdiv_ScalarList_out_typed_handle();
    return op.call(self, tensor1, tensor2, scalars, out);
}

// aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_addcdiv_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_addcdiv_ScalarList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out);
}

// aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcdiv_Tensor_out::schema> create__foreach_addcdiv_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcdiv_Tensor_out::name, _foreach_addcdiv_Tensor_out::overload_name)
      .typed<_foreach_addcdiv_Tensor_out::schema>();
}

// aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
void _foreach_addcdiv_Tensor_out::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
    
    static auto op = create__foreach_addcdiv_Tensor_out_typed_handle();
    return op.call(self, tensor1, tensor2, scalars, out);
}

// aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
void _foreach_addcdiv_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
    
    static auto op = create__foreach_addcdiv_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out);
}

// aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul_Scalar_out::schema> create__foreach_addcmul_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcmul_Scalar_out::name, _foreach_addcmul_Scalar_out::overload_name)
      .typed<_foreach_addcmul_Scalar_out::schema>();
}

// aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
void _foreach_addcmul_Scalar_out::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
    
    static auto op = create__foreach_addcmul_Scalar_out_typed_handle();
    return op.call(self, tensor1, tensor2, value, out);
}

// aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
void _foreach_addcmul_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
    
    static auto op = create__foreach_addcmul_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, value, out);
}

// aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul_ScalarList_out::schema> create__foreach_addcmul_ScalarList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcmul_ScalarList_out::name, _foreach_addcmul_ScalarList_out::overload_name)
      .typed<_foreach_addcmul_ScalarList_out::schema>();
}

// aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_addcmul_ScalarList_out::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_addcmul_ScalarList_out_typed_handle();
    return op.call(self, tensor1, tensor2, scalars, out);
}

// aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
void _foreach_addcmul_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
    
    static auto op = create__foreach_addcmul_ScalarList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out);
}

// aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_addcmul_Tensor_out::schema> create__foreach_addcmul_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_addcmul_Tensor_out::name, _foreach_addcmul_Tensor_out::overload_name)
      .typed<_foreach_addcmul_Tensor_out::schema>();
}

// aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
void _foreach_addcmul_Tensor_out::call(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
    
    static auto op = create__foreach_addcmul_Tensor_out_typed_handle();
    return op.call(self, tensor1, tensor2, scalars, out);
}

// aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
void _foreach_addcmul_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
    
    static auto op = create__foreach_addcmul_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out);
}

// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_abs_out::schema> create__foreach_abs_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_abs_out::name, _foreach_abs_out::overload_name)
      .typed<_foreach_abs_out::schema>();
}

// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_abs_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_abs_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_abs_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_abs_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_acos_out::schema> create__foreach_acos_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_acos_out::name, _foreach_acos_out::overload_name)
      .typed<_foreach_acos_out::schema>();
}

// aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_acos_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_acos_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_acos_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_acos_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_asin_out::schema> create__foreach_asin_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_asin_out::name, _foreach_asin_out::overload_name)
      .typed<_foreach_asin_out::schema>();
}

// aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_asin_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_asin_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_asin_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_asin_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_atan_out::schema> create__foreach_atan_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_atan_out::name, _foreach_atan_out::overload_name)
      .typed<_foreach_atan_out::schema>();
}

// aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_atan_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_atan_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_atan_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_atan_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_ceil_out::schema> create__foreach_ceil_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_ceil_out::name, _foreach_ceil_out::overload_name)
      .typed<_foreach_ceil_out::schema>();
}

// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_ceil_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_ceil_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_ceil_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_ceil_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_cos_out::schema> create__foreach_cos_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_cos_out::name, _foreach_cos_out::overload_name)
      .typed<_foreach_cos_out::schema>();
}

// aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_cos_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_cos_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_cos_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_cos_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_cosh_out::schema> create__foreach_cosh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_cosh_out::name, _foreach_cosh_out::overload_name)
      .typed<_foreach_cosh_out::schema>();
}

// aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_cosh_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_cosh_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_cosh_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_cosh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_erf_out::schema> create__foreach_erf_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_erf_out::name, _foreach_erf_out::overload_name)
      .typed<_foreach_erf_out::schema>();
}

// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_erf_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_erf_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_erf_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_erf_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_erfc_out::schema> create__foreach_erfc_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_erfc_out::name, _foreach_erfc_out::overload_name)
      .typed<_foreach_erfc_out::schema>();
}

// aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_erfc_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_erfc_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_erfc_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_erfc_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_exp_out::schema> create__foreach_exp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_exp_out::name, _foreach_exp_out::overload_name)
      .typed<_foreach_exp_out::schema>();
}

// aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_exp_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_exp_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_exp_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_exp_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_expm1_out::schema> create__foreach_expm1_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_expm1_out::name, _foreach_expm1_out::overload_name)
      .typed<_foreach_expm1_out::schema>();
}

// aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_expm1_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_expm1_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_expm1_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_expm1_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_floor_out::schema> create__foreach_floor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_floor_out::name, _foreach_floor_out::overload_name)
      .typed<_foreach_floor_out::schema>();
}

// aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_floor_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_floor_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_floor_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_floor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_frac_out::schema> create__foreach_frac_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_frac_out::name, _foreach_frac_out::overload_name)
      .typed<_foreach_frac_out::schema>();
}

// aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_frac_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_frac_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_frac_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_frac_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lerp_List_out::schema> create__foreach_lerp_List_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_lerp_List_out::name, _foreach_lerp_List_out::overload_name)
      .typed<_foreach_lerp_List_out::schema>();
}

// aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()
void _foreach_lerp_List_out::call(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) {
    
    static auto op = create__foreach_lerp_List_out_typed_handle();
    return op.call(self, tensors1, weights, out);
}

// aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()
void _foreach_lerp_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) {
    
    static auto op = create__foreach_lerp_List_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensors1, weights, out);
}

// aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lerp_Scalar_out::schema> create__foreach_lerp_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_lerp_Scalar_out::name, _foreach_lerp_Scalar_out::overload_name)
      .typed<_foreach_lerp_Scalar_out::schema>();
}

// aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()
void _foreach_lerp_Scalar_out::call(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) {
    
    static auto op = create__foreach_lerp_Scalar_out_typed_handle();
    return op.call(self, tensors1, weight, out);
}

// aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()
void _foreach_lerp_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) {
    
    static auto op = create__foreach_lerp_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensors1, weight, out);
}

// aten::_foreach_lerp.ScalarList_out(Tensor[] self, Tensor[] tensors1, Scalar[] weight, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lerp_ScalarList_out::schema> create__foreach_lerp_ScalarList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_lerp_ScalarList_out::name, _foreach_lerp_ScalarList_out::overload_name)
      .typed<_foreach_lerp_ScalarList_out::schema>();
}

// aten::_foreach_lerp.ScalarList_out(Tensor[] self, Tensor[] tensors1, Scalar[] weight, *, Tensor(a!)[] out) -> ()
void _foreach_lerp_ScalarList_out::call(at::TensorList self, at::TensorList tensors1, at::ArrayRef<at::Scalar> weight, at::TensorList out) {
    
    static auto op = create__foreach_lerp_ScalarList_out_typed_handle();
    return op.call(self, tensors1, weight, out);
}

// aten::_foreach_lerp.ScalarList_out(Tensor[] self, Tensor[] tensors1, Scalar[] weight, *, Tensor(a!)[] out) -> ()
void _foreach_lerp_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::ArrayRef<at::Scalar> weight, at::TensorList out) {
    
    static auto op = create__foreach_lerp_ScalarList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, tensors1, weight, out);
}

// aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_lgamma_out::schema> create__foreach_lgamma_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_lgamma_out::name, _foreach_lgamma_out::overload_name)
      .typed<_foreach_lgamma_out::schema>();
}

// aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_lgamma_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_lgamma_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_lgamma_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_lgamma_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log_out::schema> create__foreach_log_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log_out::name, _foreach_log_out::overload_name)
      .typed<_foreach_log_out::schema>();
}

// aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_log_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_log_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_log_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_log_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log10_out::schema> create__foreach_log10_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log10_out::name, _foreach_log10_out::overload_name)
      .typed<_foreach_log10_out::schema>();
}

// aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_log10_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_log10_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_log10_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_log10_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log1p_out::schema> create__foreach_log1p_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log1p_out::name, _foreach_log1p_out::overload_name)
      .typed<_foreach_log1p_out::schema>();
}

// aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_log1p_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_log1p_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_log1p_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_log1p_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_log2_out::schema> create__foreach_log2_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_log2_out::name, _foreach_log2_out::overload_name)
      .typed<_foreach_log2_out::schema>();
}

// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_log2_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_log2_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_log2_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_log2_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_max.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_max_out::schema> create__foreach_max_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_max_out::name, _foreach_max_out::overload_name)
      .typed<_foreach_max_out::schema>();
}

// aten::_foreach_max.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_max_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_max_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_max.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_max_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_max_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_neg_out::schema> create__foreach_neg_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_neg_out::name, _foreach_neg_out::overload_name)
      .typed<_foreach_neg_out::schema>();
}

// aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_neg_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_neg_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_neg_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_neg_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, ScalarType? dtype=None, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_norm_Scalar_out::schema> create__foreach_norm_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_norm_Scalar_out::name, _foreach_norm_Scalar_out::overload_name)
      .typed<_foreach_norm_Scalar_out::schema>();
}

// aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, ScalarType? dtype=None, *, Tensor(a!)[] out) -> ()
void _foreach_norm_Scalar_out::call(at::TensorList self, const at::Scalar & ord, ::std::optional<at::ScalarType> dtype, at::TensorList out) {
    
    static auto op = create__foreach_norm_Scalar_out_typed_handle();
    return op.call(self, ord, dtype, out);
}

// aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, ScalarType? dtype=None, *, Tensor(a!)[] out) -> ()
void _foreach_norm_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & ord, ::std::optional<at::ScalarType> dtype, at::TensorList out) {
    
    static auto op = create__foreach_norm_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, ord, dtype, out);
}

// aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow_List_out::schema> create__foreach_pow_List_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow_List_out::name, _foreach_pow_List_out::overload_name)
      .typed<_foreach_pow_List_out::schema>();
}

// aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> ()
void _foreach_pow_List_out::call(at::TensorList self, at::TensorList exponent, at::TensorList out) {
    
    static auto op = create__foreach_pow_List_out_typed_handle();
    return op.call(self, exponent, out);
}

// aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> ()
void _foreach_pow_List_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList exponent, at::TensorList out) {
    
    static auto op = create__foreach_pow_List_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent, out);
}

// aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow_Scalar_out::schema> create__foreach_pow_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow_Scalar_out::name, _foreach_pow_Scalar_out::overload_name)
      .typed<_foreach_pow_Scalar_out::schema>();
}

// aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> ()
void _foreach_pow_Scalar_out::call(at::TensorList self, const at::Scalar & exponent, at::TensorList out) {
    
    static auto op = create__foreach_pow_Scalar_out_typed_handle();
    return op.call(self, exponent, out);
}

// aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> ()
void _foreach_pow_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & exponent, at::TensorList out) {
    
    static auto op = create__foreach_pow_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent, out);
}

// aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_pow_ScalarList_out::schema> create__foreach_pow_ScalarList_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_pow_ScalarList_out::name, _foreach_pow_ScalarList_out::overload_name)
      .typed<_foreach_pow_ScalarList_out::schema>();
}

// aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> ()
void _foreach_pow_ScalarList_out::call(at::TensorList self, at::ArrayRef<at::Scalar> exponent, at::TensorList out) {
    
    static auto op = create__foreach_pow_ScalarList_out_typed_handle();
    return op.call(self, exponent, out);
}

// aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> ()
void _foreach_pow_ScalarList_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> exponent, at::TensorList out) {
    
    static auto op = create__foreach_pow_ScalarList_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, exponent, out);
}

// aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_reciprocal_out::schema> create__foreach_reciprocal_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_reciprocal_out::name, _foreach_reciprocal_out::overload_name)
      .typed<_foreach_reciprocal_out::schema>();
}

// aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_reciprocal_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_reciprocal_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_reciprocal_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_reciprocal_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_round_out::schema> create__foreach_round_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_round_out::name, _foreach_round_out::overload_name)
      .typed<_foreach_round_out::schema>();
}

// aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_round_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_round_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_round_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_round_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_rsqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_rsqrt_out::schema> create__foreach_rsqrt_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_rsqrt_out::name, _foreach_rsqrt_out::overload_name)
      .typed<_foreach_rsqrt_out::schema>();
}

// aten::_foreach_rsqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_rsqrt_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_rsqrt_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_rsqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_rsqrt_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_rsqrt_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sigmoid_out::schema> create__foreach_sigmoid_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sigmoid_out::name, _foreach_sigmoid_out::overload_name)
      .typed<_foreach_sigmoid_out::schema>();
}

// aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sigmoid_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sigmoid_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sigmoid_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sigmoid_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_sign.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sign_out::schema> create__foreach_sign_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sign_out::name, _foreach_sign_out::overload_name)
      .typed<_foreach_sign_out::schema>();
}

// aten::_foreach_sign.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sign_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sign_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_sign.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sign_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sign_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sin_out::schema> create__foreach_sin_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sin_out::name, _foreach_sin_out::overload_name)
      .typed<_foreach_sin_out::schema>();
}

// aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sin_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sin_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sin_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sin_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sinh_out::schema> create__foreach_sinh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sinh_out::name, _foreach_sinh_out::overload_name)
      .typed<_foreach_sinh_out::schema>();
}

// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sinh_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sinh_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sinh_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sinh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_sqrt_out::schema> create__foreach_sqrt_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_sqrt_out::name, _foreach_sqrt_out::overload_name)
      .typed<_foreach_sqrt_out::schema>();
}

// aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sqrt_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sqrt_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_sqrt_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_sqrt_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_tan_out::schema> create__foreach_tan_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_tan_out::name, _foreach_tan_out::overload_name)
      .typed<_foreach_tan_out::schema>();
}

// aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_tan_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_tan_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_tan_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_tan_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_tanh_out::schema> create__foreach_tanh_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_tanh_out::name, _foreach_tanh_out::overload_name)
      .typed<_foreach_tanh_out::schema>();
}

// aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_tanh_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_tanh_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_tanh_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_tanh_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_trunc_out::schema> create__foreach_trunc_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_trunc_out::name, _foreach_trunc_out::overload_name)
      .typed<_foreach_trunc_out::schema>();
}

// aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_trunc_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_trunc_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_trunc_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_trunc_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_zero_out::schema> create__foreach_zero_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_zero_out::name, _foreach_zero_out::overload_name)
      .typed<_foreach_zero_out::schema>();
}

// aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_zero_out::call(at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_zero_out_typed_handle();
    return op.call(self, out);
}

// aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
void _foreach_zero_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
    
    static auto op = create__foreach_zero_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_zero::schema> create__foreach_zero_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_zero::name, _foreach_zero::overload_name)
      .typed<_foreach_zero::schema>();
}

// aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out
::std::vector<at::Tensor> _foreach_zero::call(at::TensorList self) {
    
    static auto op = create__foreach_zero_typed_handle();
    return op.call(self);
}

// aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out
::std::vector<at::Tensor> _foreach_zero::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
    
    static auto op = create__foreach_zero_typed_handle();
    return op.redispatch(dispatchKeySet, self);
}

// aten::_foreach_copy.out(Tensor[] self, Tensor[] src, bool non_blocking=False, *, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_foreach_copy_out::schema> create__foreach_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foreach_copy_out::name, _foreach_copy_out::overload_name)
      .typed<_foreach_copy_out::schema>();
}

// aten::_foreach_copy.out(Tensor[] self, Tensor[] src, bool non_blocking=False, *, Tensor(a!)[] out) -> ()
void _foreach_copy_out::call(at::TensorList self, at::TensorList src, bool non_blocking, at::TensorList out) {
    
    static auto op = create__foreach_copy_out_typed_handle();
    return op.call(self, src, non_blocking, out);
}

// aten::_foreach_copy.out(Tensor[] self, Tensor[] src, bool non_blocking=False, *, Tensor(a!)[] out) -> ()
void _foreach_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList src, bool non_blocking, at::TensorList out) {
    
    static auto op = create__foreach_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, src, non_blocking, out);
}

// aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<bucketize_Scalar_out::schema> create_bucketize_Scalar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(bucketize_Scalar_out::name, bucketize_Scalar_out::overload_name)
      .typed<bucketize_Scalar_out::schema>();
}

// aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bucketize_Scalar_out::call(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
    
    static auto op = create_bucketize_Scalar_out_typed_handle();
    return op.call(self, boundaries, out_int32, right, out);
}

// aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & bucketize_Scalar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
    
    static auto op = create_bucketize_Scalar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, boundaries, out_int32, right, out);
}

// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<glu_jvp_out::schema> create_glu_jvp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(glu_jvp_out::name, glu_jvp_out::overload_name)
      .typed<glu_jvp_out::schema>();
}

// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & glu_jvp_out::call(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out) {
    
    static auto op = create_glu_jvp_out_typed_handle();
    return op.call(glu, x, dx, dim, out);
}

// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & glu_jvp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out) {
    
    static auto op = create_glu_jvp_out_typed_handle();
    return op.redispatch(dispatchKeySet, glu, x, dx, dim, out);
}

// aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<glu_backward_jvp_out::schema> create_glu_backward_jvp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(glu_backward_jvp_out::name, glu_backward_jvp_out::overload_name)
      .typed<glu_backward_jvp_out::schema>();
}

// aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & glu_backward_jvp_out::call(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out) {
    
    static auto op = create_glu_backward_jvp_out_typed_handle();
    return op.call(grad_x, grad_glu, x, dgrad_glu, dx, dim, out);
}

// aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & glu_backward_jvp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out) {
    
    static auto op = create_glu_backward_jvp_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_x, grad_glu, x, dgrad_glu, dx, dim, out);
}

// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<hardswish_backward_out::schema> create_hardswish_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(hardswish_backward_out::name, hardswish_backward_out::overload_name)
      .typed<hardswish_backward_out::schema>();
}

// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hardswish_backward_out::call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_hardswish_backward_out_typed_handle();
    return op.call(grad_output, self, out);
}

// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & hardswish_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_hardswish_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, out);
}

// aten::rrelu_with_noise_functional(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> (Tensor, Tensor noise_out)
static C10_NOINLINE c10::TypedOperatorHandle<rrelu_with_noise_functional::schema> create_rrelu_with_noise_functional_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rrelu_with_noise_functional::name, rrelu_with_noise_functional::overload_name)
      .typed<rrelu_with_noise_functional::schema>();
}

// aten::rrelu_with_noise_functional(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> (Tensor, Tensor noise_out)
::std::tuple<at::Tensor,at::Tensor> rrelu_with_noise_functional::call(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu_with_noise_functional_typed_handle();
    return op.call(self, noise, lower, upper, training, generator);
}

// aten::rrelu_with_noise_functional(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> (Tensor, Tensor noise_out)
::std::tuple<at::Tensor,at::Tensor> rrelu_with_noise_functional::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
    
    static auto op = create_rrelu_with_noise_functional_typed_handle();
    return op.redispatch(dispatchKeySet, self, noise, lower, upper, training, generator);
}

// aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<rrelu_with_noise_backward_out::schema> create_rrelu_with_noise_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(rrelu_with_noise_backward_out::name, rrelu_with_noise_backward_out::overload_name)
      .typed<rrelu_with_noise_backward_out::schema>();
}

// aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rrelu_with_noise_backward_out::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result, at::Tensor & out) {
    
    static auto op = create_rrelu_with_noise_backward_out_typed_handle();
    return op.call(grad_output, self, noise, lower, upper, training, self_is_result, out);
}

// aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & rrelu_with_noise_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result, at::Tensor & out) {
    
    static auto op = create_rrelu_with_noise_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, noise, lower, upper, training, self_is_result, out);
}

// aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<mkldnn_adaptive_avg_pool2d_backward_out::schema> create_mkldnn_adaptive_avg_pool2d_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(mkldnn_adaptive_avg_pool2d_backward_out::name, mkldnn_adaptive_avg_pool2d_backward_out::overload_name)
      .typed<mkldnn_adaptive_avg_pool2d_backward_out::schema>();
}

// aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_adaptive_avg_pool2d_backward_out::call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_mkldnn_adaptive_avg_pool2d_backward_out_typed_handle();
    return op.call(grad_output, self, out);
}

// aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & mkldnn_adaptive_avg_pool2d_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_mkldnn_adaptive_avg_pool2d_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, out);
}

// aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool2d_out::schema> create__adaptive_avg_pool2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_adaptive_avg_pool2d_out::name, _adaptive_avg_pool2d_out::overload_name)
      .typed<_adaptive_avg_pool2d_out::schema>();
}

// aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _adaptive_avg_pool2d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create__adaptive_avg_pool2d_out_typed_handle();
    return op.call(self, output_size, out);
}

// aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _adaptive_avg_pool2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create__adaptive_avg_pool2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, out);
}

// aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool2d_backward_out::schema> create__adaptive_avg_pool2d_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_adaptive_avg_pool2d_backward_out::name, _adaptive_avg_pool2d_backward_out::overload_name)
      .typed<_adaptive_avg_pool2d_backward_out::schema>();
}

// aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _adaptive_avg_pool2d_backward_out::call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__adaptive_avg_pool2d_backward_out_typed_handle();
    return op.call(grad_output, self, out);
}

// aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _adaptive_avg_pool2d_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__adaptive_avg_pool2d_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, out);
}

// aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool3d_out::schema> create__adaptive_avg_pool3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_adaptive_avg_pool3d_out::name, _adaptive_avg_pool3d_out::overload_name)
      .typed<_adaptive_avg_pool3d_out::schema>();
}

// aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _adaptive_avg_pool3d_out::call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create__adaptive_avg_pool3d_out_typed_handle();
    return op.call(self, output_size, out);
}

// aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _adaptive_avg_pool3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create__adaptive_avg_pool3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, output_size, out);
}

// aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_adaptive_avg_pool3d_backward_out::schema> create__adaptive_avg_pool3d_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_adaptive_avg_pool3d_backward_out::name, _adaptive_avg_pool3d_backward_out::overload_name)
      .typed<_adaptive_avg_pool3d_backward_out::schema>();
}

// aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _adaptive_avg_pool3d_backward_out::call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__adaptive_avg_pool3d_backward_out_typed_handle();
    return op.call(grad_output, self, out);
}

// aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _adaptive_avg_pool3d_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__adaptive_avg_pool3d_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, out);
}

// aten::upsample_bilinear2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_bilinear2d_vec_out::schema> create_upsample_bilinear2d_vec_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_bilinear2d_vec_out::name, upsample_bilinear2d_vec_out::overload_name)
      .typed<upsample_bilinear2d_vec_out::schema>();
}

// aten::upsample_bilinear2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_bilinear2d_vec_out::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors, at::Tensor & out) {
    
    static auto op = create_upsample_bilinear2d_vec_out_typed_handle();
    return op.call(input, output_size, align_corners, scale_factors, out);
}

// aten::upsample_bilinear2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_bilinear2d_vec_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors, at::Tensor & out) {
    
    static auto op = create_upsample_bilinear2d_vec_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors, out);
}

// aten::upsample_nearest2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<upsample_nearest2d_vec_out::schema> create_upsample_nearest2d_vec_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(upsample_nearest2d_vec_out::name, upsample_nearest2d_vec_out::overload_name)
      .typed<upsample_nearest2d_vec_out::schema>();
}

// aten::upsample_nearest2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_nearest2d_vec_out::call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors, at::Tensor & out) {
    
    static auto op = create_upsample_nearest2d_vec_out_typed_handle();
    return op.call(input, output_size, scale_factors, out);
}

// aten::upsample_nearest2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & upsample_nearest2d_vec_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors, at::Tensor & out) {
    
    static auto op = create_upsample_nearest2d_vec_out_typed_handle();
    return op.redispatch(dispatchKeySet, input, output_size, scale_factors, out);
}

// aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
static C10_NOINLINE c10::TypedOperatorHandle<_slow_conv2d_backward_output_mask_out::schema> create__slow_conv2d_backward_output_mask_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_slow_conv2d_backward_output_mask_out::name, _slow_conv2d_backward_output_mask_out::overload_name)
      .typed<_slow_conv2d_backward_output_mask_out::schema>();
}

// aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_output_mask_out::call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create__slow_conv2d_backward_output_mask_out_typed_handle();
    return op.call(grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2);
}

// aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_output_mask_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
    
    static auto op = create__slow_conv2d_backward_output_mask_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2);
}

// aten::conv_depthwise3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<conv_depthwise3d_out::schema> create_conv_depthwise3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(conv_depthwise3d_out::name, conv_depthwise3d_out::overload_name)
      .typed<conv_depthwise3d_out::schema>();
}

// aten::conv_depthwise3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & conv_depthwise3d_out::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
    
    static auto op = create_conv_depthwise3d_out_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, dilation, out);
}

// aten::conv_depthwise3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias, SymInt[3] stride, SymInt[3] padding, SymInt[3] dilation, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & conv_depthwise3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
    
    static auto op = create_conv_depthwise3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out);
}

// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_dilated2d_out::schema> create_slow_conv_dilated2d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slow_conv_dilated2d_out::name, slow_conv_dilated2d_out::overload_name)
      .typed<slow_conv_dilated2d_out::schema>();
}

// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slow_conv_dilated2d_out::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
    
    static auto op = create_slow_conv_dilated2d_out_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, dilation, out);
}

// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slow_conv_dilated2d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
    
    static auto op = create_slow_conv_dilated2d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out);
}

// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<slow_conv_dilated3d_out::schema> create_slow_conv_dilated3d_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slow_conv_dilated3d_out::name, slow_conv_dilated3d_out::overload_name)
      .typed<slow_conv_dilated3d_out::schema>();
}

// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slow_conv_dilated3d_out::call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
    
    static auto op = create_slow_conv_dilated3d_out_typed_handle();
    return op.call(self, weight, kernel_size, bias, stride, padding, dilation, out);
}

// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slow_conv_dilated3d_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
    
    static auto op = create_slow_conv_dilated3d_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out);
}

// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<isinf_out::schema> create_isinf_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(isinf_out::name, isinf_out::overload_name)
      .typed<isinf_out::schema>();
}

// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isinf_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_isinf_out_typed_handle();
    return op.call(self, out);
}

// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & isinf_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_isinf_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<linalg_matrix_exp_out::schema> create_linalg_matrix_exp_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(linalg_matrix_exp_out::name, linalg_matrix_exp_out::overload_name)
      .typed<linalg_matrix_exp_out::schema>();
}

// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_exp_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_exp_out_typed_handle();
    return op.call(self, out);
}

// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & linalg_matrix_exp_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_linalg_matrix_exp_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_test_optional_intlist_out::schema> create__test_optional_intlist_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_optional_intlist_out::name, _test_optional_intlist_out::overload_name)
      .typed<_test_optional_intlist_out::schema>();
}

// aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_optional_intlist_out::call(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) {
    
    static auto op = create__test_optional_intlist_out_typed_handle();
    return op.call(values, addends, out);
}

// aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_optional_intlist_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) {
    
    static auto op = create__test_optional_intlist_out_typed_handle();
    return op.redispatch(dispatchKeySet, values, addends, out);
}

// aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_test_optional_filled_intlist_out::schema> create__test_optional_filled_intlist_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_optional_filled_intlist_out::name, _test_optional_filled_intlist_out::overload_name)
      .typed<_test_optional_filled_intlist_out::schema>();
}

// aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_optional_filled_intlist_out::call(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) {
    
    static auto op = create__test_optional_filled_intlist_out_typed_handle();
    return op.call(values, addends, out);
}

// aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_optional_filled_intlist_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) {
    
    static auto op = create__test_optional_filled_intlist_out_typed_handle();
    return op.redispatch(dispatchKeySet, values, addends, out);
}

// aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_test_optional_floatlist_out::schema> create__test_optional_floatlist_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_optional_floatlist_out::name, _test_optional_floatlist_out::overload_name)
      .typed<_test_optional_floatlist_out::schema>();
}

// aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_optional_floatlist_out::call(const at::Tensor & values, ::std::optional<at::ArrayRef<double>> addends, at::Tensor & out) {
    
    static auto op = create__test_optional_floatlist_out_typed_handle();
    return op.call(values, addends, out);
}

// aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_optional_floatlist_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, ::std::optional<at::ArrayRef<double>> addends, at::Tensor & out) {
    
    static auto op = create__test_optional_floatlist_out_typed_handle();
    return op.redispatch(dispatchKeySet, values, addends, out);
}

// aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_test_warn_in_autograd_out::schema> create__test_warn_in_autograd_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_warn_in_autograd_out::name, _test_warn_in_autograd_out::overload_name)
      .typed<_test_warn_in_autograd_out::schema>();
}

// aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_warn_in_autograd_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__test_warn_in_autograd_out_typed_handle();
    return op.call(self, out);
}

// aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_warn_in_autograd_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__test_warn_in_autograd_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_test_autograd_multiple_dispatch_fullcoverage_out::schema> create__test_autograd_multiple_dispatch_fullcoverage_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_autograd_multiple_dispatch_fullcoverage_out::name, _test_autograd_multiple_dispatch_fullcoverage_out::overload_name)
      .typed<_test_autograd_multiple_dispatch_fullcoverage_out::schema>();
}

// aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_autograd_multiple_dispatch_fullcoverage_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__test_autograd_multiple_dispatch_fullcoverage_out_typed_handle();
    return op.call(self, out);
}

// aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_autograd_multiple_dispatch_fullcoverage_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__test_autograd_multiple_dispatch_fullcoverage_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_test_autograd_multiple_dispatch_view_copy_out::schema> create__test_autograd_multiple_dispatch_view_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_test_autograd_multiple_dispatch_view_copy_out::name, _test_autograd_multiple_dispatch_view_copy_out::overload_name)
      .typed<_test_autograd_multiple_dispatch_view_copy_out::schema>();
}

// aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_autograd_multiple_dispatch_view_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__test_autograd_multiple_dispatch_view_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _test_autograd_multiple_dispatch_view_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__test_autograd_multiple_dispatch_view_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<segment_reduce_out::schema> create_segment_reduce_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(segment_reduce_out::name, segment_reduce_out::overload_name)
      .typed<segment_reduce_out::schema>();
}

// aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & segment_reduce_out::call(const at::Tensor & data, c10::string_view reduce, const ::std::optional<at::Tensor> & lengths, const ::std::optional<at::Tensor> & indices, const ::std::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const ::std::optional<at::Scalar> & initial, at::Tensor & out) {
    
    static auto op = create_segment_reduce_out_typed_handle();
    return op.call(data, reduce, lengths, indices, offsets, axis, unsafe, initial, out);
}

// aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & segment_reduce_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, c10::string_view reduce, const ::std::optional<at::Tensor> & lengths, const ::std::optional<at::Tensor> & indices, const ::std::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const ::std::optional<at::Scalar> & initial, at::Tensor & out) {
    
    static auto op = create_segment_reduce_out_typed_handle();
    return op.redispatch(dispatchKeySet, data, reduce, lengths, indices, offsets, axis, unsafe, initial, out);
}

// aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_segment_reduce_backward_out::schema> create__segment_reduce_backward_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_segment_reduce_backward_out::name, _segment_reduce_backward_out::overload_name)
      .typed<_segment_reduce_backward_out::schema>();
}

// aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _segment_reduce_backward_out::call(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const ::std::optional<at::Tensor> & lengths, const ::std::optional<at::Tensor> & offsets, int64_t axis, const ::std::optional<at::Scalar> & initial, at::Tensor & out) {
    
    static auto op = create__segment_reduce_backward_out_typed_handle();
    return op.call(grad, output, data, reduce, lengths, offsets, axis, initial, out);
}

// aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _segment_reduce_backward_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const ::std::optional<at::Tensor> & lengths, const ::std::optional<at::Tensor> & offsets, int64_t axis, const ::std::optional<at::Scalar> & initial, at::Tensor & out) {
    
    static auto op = create__segment_reduce_backward_out_typed_handle();
    return op.redispatch(dispatchKeySet, grad, output, data, reduce, lengths, offsets, axis, initial, out);
}

// aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_nested_tensor_from_tensor_list_out::schema> create__nested_tensor_from_tensor_list_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_nested_tensor_from_tensor_list_out::name, _nested_tensor_from_tensor_list_out::overload_name)
      .typed<_nested_tensor_from_tensor_list_out::schema>();
}

// aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_from_tensor_list_out::call(at::TensorList list, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, at::Tensor & out) {
    
    static auto op = create__nested_tensor_from_tensor_list_out_typed_handle();
    return op.call(list, dtype, layout, device, pin_memory, out);
}

// aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _nested_tensor_from_tensor_list_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList list, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, at::Tensor & out) {
    
    static auto op = create__nested_tensor_from_tensor_list_out_typed_handle();
    return op.redispatch(dispatchKeySet, list, dtype, layout, device, pin_memory, out);
}

// aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_fw_primal_copy_out::schema> create__fw_primal_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fw_primal_copy_out::name, _fw_primal_copy_out::overload_name)
      .typed<_fw_primal_copy_out::schema>();
}

// aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _fw_primal_copy_out::call(const at::Tensor & self, int64_t level, at::Tensor & out) {
    
    static auto op = create__fw_primal_copy_out_typed_handle();
    return op.call(self, level, out);
}

// aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _fw_primal_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level, at::Tensor & out) {
    
    static auto op = create__fw_primal_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, level, out);
}

// aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_make_dual_copy_out::schema> create__make_dual_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_make_dual_copy_out::name, _make_dual_copy_out::overload_name)
      .typed<_make_dual_copy_out::schema>();
}

// aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _make_dual_copy_out::call(const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out) {
    
    static auto op = create__make_dual_copy_out_typed_handle();
    return op.call(primal, tangent, level, out);
}

// aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _make_dual_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out) {
    
    static auto op = create__make_dual_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, primal, tangent, level, out);
}

// aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<view_as_real_copy_out::schema> create_view_as_real_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_as_real_copy_out::name, view_as_real_copy_out::overload_name)
      .typed<view_as_real_copy_out::schema>();
}

// aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & view_as_real_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_view_as_real_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & view_as_real_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_view_as_real_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<view_as_complex_copy_out::schema> create_view_as_complex_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_as_complex_copy_out::name, view_as_complex_copy_out::overload_name)
      .typed<view_as_complex_copy_out::schema>();
}

// aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & view_as_complex_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_view_as_complex_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & view_as_complex_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_view_as_complex_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_conj_copy_out::schema> create__conj_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_conj_copy_out::name, _conj_copy_out::overload_name)
      .typed<_conj_copy_out::schema>();
}

// aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _conj_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__conj_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _conj_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__conj_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_neg_view_copy_out::schema> create__neg_view_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_neg_view_copy_out::name, _neg_view_copy_out::overload_name)
      .typed<_neg_view_copy_out::schema>();
}

// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _neg_view_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__neg_view_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _neg_view_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__neg_view_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<as_strided_copy_out::schema> create_as_strided_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(as_strided_copy_out::name, as_strided_copy_out::overload_name)
      .typed<as_strided_copy_out::schema>();
}

// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & as_strided_copy_out::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset, at::Tensor & out) {
    
    static auto op = create_as_strided_copy_out_typed_handle();
    return op.call(self, size, stride, storage_offset, out);
}

// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & as_strided_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset, at::Tensor & out) {
    
    static auto op = create_as_strided_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, stride, storage_offset, out);
}

// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_sparse_broadcast_to_copy_out::schema> create__sparse_broadcast_to_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_sparse_broadcast_to_copy_out::name, _sparse_broadcast_to_copy_out::overload_name)
      .typed<_sparse_broadcast_to_copy_out::schema>();
}

// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_broadcast_to_copy_out::call(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
    
    static auto op = create__sparse_broadcast_to_copy_out_typed_handle();
    return op.call(self, size, out);
}

// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _sparse_broadcast_to_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
    
    static auto op = create__sparse_broadcast_to_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, out);
}

// aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<diagonal_copy_out::schema> create_diagonal_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(diagonal_copy_out::name, diagonal_copy_out::overload_name)
      .typed<diagonal_copy_out::schema>();
}

// aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diagonal_copy_out::call(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
    
    static auto op = create_diagonal_copy_out_typed_handle();
    return op.call(self, offset, dim1, dim2, out);
}

// aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & diagonal_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
    
    static auto op = create_diagonal_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, offset, dim1, dim2, out);
}

// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<expand_copy_out::schema> create_expand_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(expand_copy_out::name, expand_copy_out::overload_name)
      .typed<expand_copy_out::schema>();
}

// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & expand_copy_out::call(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit, at::Tensor & out) {
    
    static auto op = create_expand_copy_out_typed_handle();
    return op.call(self, size, implicit, out);
}

// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)
at::Tensor & expand_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit, at::Tensor & out) {
    
    static auto op = create_expand_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, implicit, out);
}

// aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<permute_copy_out::schema> create_permute_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(permute_copy_out::name, permute_copy_out::overload_name)
      .typed<permute_copy_out::schema>();
}

// aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & permute_copy_out::call(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) {
    
    static auto op = create_permute_copy_out_typed_handle();
    return op.call(self, dims, out);
}

// aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & permute_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) {
    
    static auto op = create_permute_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dims, out);
}

// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_reshape_alias_copy_out::schema> create__reshape_alias_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_reshape_alias_copy_out::name, _reshape_alias_copy_out::overload_name)
      .typed<_reshape_alias_copy_out::schema>();
}

// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _reshape_alias_copy_out::call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create__reshape_alias_copy_out_typed_handle();
    return op.call(self, size, stride, out);
}

// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _reshape_alias_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
    
    static auto op = create__reshape_alias_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, stride, out);
}

// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<select_copy_int_out::schema> create_select_copy_int_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(select_copy_int_out::name, select_copy_int_out::overload_name)
      .typed<select_copy_int_out::schema>();
}

// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & select_copy_int_out::call(const at::Tensor & self, int64_t dim, c10::SymInt index, at::Tensor & out) {
    
    static auto op = create_select_copy_int_out_typed_handle();
    return op.call(self, dim, index, out);
}

// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & select_copy_int_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt index, at::Tensor & out) {
    
    static auto op = create_select_copy_int_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, index, out);
}

// aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<detach_copy_out::schema> create_detach_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(detach_copy_out::name, detach_copy_out::overload_name)
      .typed<detach_copy_out::schema>();
}

// aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & detach_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_detach_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & detach_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_detach_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<slice_copy_Tensor_out::schema> create_slice_copy_Tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(slice_copy_Tensor_out::name, slice_copy_Tensor_out::overload_name)
      .typed<slice_copy_Tensor_out::schema>();
}

// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slice_copy_Tensor_out::call(const at::Tensor & self, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) {
    
    static auto op = create_slice_copy_Tensor_out_typed_handle();
    return op.call(self, dim, start, end, step, out);
}

// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & slice_copy_Tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) {
    
    static auto op = create_slice_copy_Tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, start, end, step, out);
}

// aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<squeeze_copy_out::schema> create_squeeze_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(squeeze_copy_out::name, squeeze_copy_out::overload_name)
      .typed<squeeze_copy_out::schema>();
}

// aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & squeeze_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_squeeze_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & squeeze_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_squeeze_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<squeeze_copy_dim_out::schema> create_squeeze_copy_dim_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(squeeze_copy_dim_out::name, squeeze_copy_dim_out::overload_name)
      .typed<squeeze_copy_dim_out::schema>();
}

// aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & squeeze_copy_dim_out::call(const at::Tensor & self, int64_t dim, at::Tensor & out) {
    
    static auto op = create_squeeze_copy_dim_out_typed_handle();
    return op.call(self, dim, out);
}

// aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & squeeze_copy_dim_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) {
    
    static auto op = create_squeeze_copy_dim_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, out);
}

// aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<squeeze_copy_dims_out::schema> create_squeeze_copy_dims_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(squeeze_copy_dims_out::name, squeeze_copy_dims_out::overload_name)
      .typed<squeeze_copy_dims_out::schema>();
}

// aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & squeeze_copy_dims_out::call(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
    
    static auto op = create_squeeze_copy_dims_out_typed_handle();
    return op.call(self, dim, out);
}

// aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & squeeze_copy_dims_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
    
    static auto op = create_squeeze_copy_dims_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, out);
}

// aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<t_copy_out::schema> create_t_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(t_copy_out::name, t_copy_out::overload_name)
      .typed<t_copy_out::schema>();
}

// aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & t_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_t_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & t_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_t_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<transpose_copy_int_out::schema> create_transpose_copy_int_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(transpose_copy_int_out::name, transpose_copy_int_out::overload_name)
      .typed<transpose_copy_int_out::schema>();
}

// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & transpose_copy_int_out::call(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) {
    
    static auto op = create_transpose_copy_int_out_typed_handle();
    return op.call(self, dim0, dim1, out);
}

// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & transpose_copy_int_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) {
    
    static auto op = create_transpose_copy_int_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim0, dim1, out);
}

// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<unsqueeze_copy_out::schema> create_unsqueeze_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unsqueeze_copy_out::name, unsqueeze_copy_out::overload_name)
      .typed<unsqueeze_copy_out::schema>();
}

// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & unsqueeze_copy_out::call(const at::Tensor & self, int64_t dim, at::Tensor & out) {
    
    static auto op = create_unsqueeze_copy_out_typed_handle();
    return op.call(self, dim, out);
}

// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & unsqueeze_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) {
    
    static auto op = create_unsqueeze_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dim, out);
}

// aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_indices_copy_out::schema> create__indices_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_indices_copy_out::name, _indices_copy_out::overload_name)
      .typed<_indices_copy_out::schema>();
}

// aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _indices_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__indices_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _indices_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__indices_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_values_copy_out::schema> create__values_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_values_copy_out::name, _values_copy_out::overload_name)
      .typed<_values_copy_out::schema>();
}

// aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _values_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__values_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _values_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create__values_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<indices_copy_out::schema> create_indices_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(indices_copy_out::name, indices_copy_out::overload_name)
      .typed<indices_copy_out::schema>();
}

// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & indices_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_indices_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & indices_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_indices_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<values_copy_out::schema> create_values_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(values_copy_out::name, values_copy_out::overload_name)
      .typed<values_copy_out::schema>();
}

// aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & values_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_values_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & values_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_values_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<crow_indices_copy_out::schema> create_crow_indices_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(crow_indices_copy_out::name, crow_indices_copy_out::overload_name)
      .typed<crow_indices_copy_out::schema>();
}

// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & crow_indices_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_crow_indices_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & crow_indices_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_crow_indices_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<col_indices_copy_out::schema> create_col_indices_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(col_indices_copy_out::name, col_indices_copy_out::overload_name)
      .typed<col_indices_copy_out::schema>();
}

// aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & col_indices_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_col_indices_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & col_indices_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_col_indices_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<ccol_indices_copy_out::schema> create_ccol_indices_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(ccol_indices_copy_out::name, ccol_indices_copy_out::overload_name)
      .typed<ccol_indices_copy_out::schema>();
}

// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ccol_indices_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_ccol_indices_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & ccol_indices_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_ccol_indices_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<row_indices_copy_out::schema> create_row_indices_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(row_indices_copy_out::name, row_indices_copy_out::overload_name)
      .typed<row_indices_copy_out::schema>();
}

// aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & row_indices_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_row_indices_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & row_indices_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_row_indices_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<view_copy_out::schema> create_view_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_copy_out::name, view_copy_out::overload_name)
      .typed<view_copy_out::schema>();
}

// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & view_copy_out::call(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_view_copy_out_typed_handle();
    return op.call(self, size, out);
}

// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & view_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
    
    static auto op = create_view_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, size, out);
}

// aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<view_copy_dtype_out::schema> create_view_copy_dtype_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(view_copy_dtype_out::name, view_copy_dtype_out::overload_name)
      .typed<view_copy_dtype_out::schema>();
}

// aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & view_copy_dtype_out::call(const at::Tensor & self, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_view_copy_dtype_out_typed_handle();
    return op.call(self, dtype, out);
}

// aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & view_copy_dtype_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, at::Tensor & out) {
    
    static auto op = create_view_copy_dtype_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dtype, out);
}

// aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<unfold_copy_out::schema> create_unfold_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(unfold_copy_out::name, unfold_copy_out::overload_name)
      .typed<unfold_copy_out::schema>();
}

// aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & unfold_copy_out::call(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step, at::Tensor & out) {
    
    static auto op = create_unfold_copy_out_typed_handle();
    return op.call(self, dimension, size, step, out);
}

// aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & unfold_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step, at::Tensor & out) {
    
    static auto op = create_unfold_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, dimension, size, step, out);
}

// aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<alias_copy_out::schema> create_alias_copy_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(alias_copy_out::name, alias_copy_out::overload_name)
      .typed<alias_copy_out::schema>();
}

// aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & alias_copy_out::call(const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_alias_copy_out_typed_handle();
    return op.call(self, out);
}

// aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & alias_copy_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
    
    static auto op = create_alias_copy_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, out);
}

// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<to_padded_tensor_out::schema> create_to_padded_tensor_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(to_padded_tensor_out::name, to_padded_tensor_out::overload_name)
      .typed<to_padded_tensor_out::schema>();
}

// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & to_padded_tensor_out::call(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create_to_padded_tensor_out_typed_handle();
    return op.call(self, padding, output_size, out);
}

// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & to_padded_tensor_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out) {
    
    static auto op = create_to_padded_tensor_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, padding, output_size, out);
}

// aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_transformer_encoder_layer_fwd_out::schema> create__transformer_encoder_layer_fwd_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_transformer_encoder_layer_fwd_out::name, _transformer_encoder_layer_fwd_out::overload_name)
      .typed<_transformer_encoder_layer_fwd_out::schema>();
}

// aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _transformer_encoder_layer_fwd_out::call(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const ::std::optional<at::Tensor> & mask, ::std::optional<int64_t> mask_type, at::Tensor & out) {
    
    static auto op = create__transformer_encoder_layer_fwd_out_typed_handle();
    return op.call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type, out);
}

// aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _transformer_encoder_layer_fwd_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const ::std::optional<at::Tensor> & mask, ::std::optional<int64_t> mask_type, at::Tensor & out) {
    
    static auto op = create__transformer_encoder_layer_fwd_out_typed_handle();
    return op.redispatch(dispatchKeySet, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type, out);
}

// aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
static C10_NOINLINE c10::TypedOperatorHandle<_native_multi_head_attention_out::schema> create__native_multi_head_attention_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_native_multi_head_attention_out::name, _native_multi_head_attention_out::overload_name)
      .typed<_native_multi_head_attention_out::schema>();
}

// aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _native_multi_head_attention_out::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, ::std::optional<int64_t> mask_type, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__native_multi_head_attention_out_typed_handle();
    return op.call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type, out0, out1);
}

// aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
::std::tuple<at::Tensor &,at::Tensor &> _native_multi_head_attention_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, ::std::optional<int64_t> mask_type, at::Tensor & out0, at::Tensor & out1) {
    
    static auto op = create__native_multi_head_attention_out_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type, out0, out1);
}

// aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_triton_scaled_dot_attention_out::schema> create__triton_scaled_dot_attention_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_triton_scaled_dot_attention_out::name, _triton_scaled_dot_attention_out::overload_name)
      .typed<_triton_scaled_dot_attention_out::schema>();
}

// aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _triton_scaled_dot_attention_out::call(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p, at::Tensor & out) {
    
    static auto op = create__triton_scaled_dot_attention_out_typed_handle();
    return op.call(q, k, v, dropout_p, out);
}

// aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _triton_scaled_dot_attention_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p, at::Tensor & out) {
    
    static auto op = create__triton_scaled_dot_attention_out_typed_handle();
    return op.redispatch(dispatchKeySet, q, k, v, dropout_p, out);
}

// aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_triton_multi_head_attention_out::schema> create__triton_multi_head_attention_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_triton_multi_head_attention_out::name, _triton_multi_head_attention_out::overload_name)
      .typed<_triton_multi_head_attention_out::schema>();
}

// aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _triton_multi_head_attention_out::call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional<at::Tensor> & mask, at::Tensor & out) {
    
    static auto op = create__triton_multi_head_attention_out_typed_handle();
    return op.call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, out);
}

// aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _triton_multi_head_attention_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional<at::Tensor> & mask, at::Tensor & out) {
    
    static auto op = create__triton_multi_head_attention_out_typed_handle();
    return op.redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, out);
}

// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)
static C10_NOINLINE c10::TypedOperatorHandle<_foobar_out::schema> create__foobar_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_foobar_out::name, _foobar_out::overload_name)
      .typed<_foobar_out::schema>();
}

// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _foobar_out::call(const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out) {
    
    static auto op = create__foobar_out_typed_handle();
    return op.call(self, arg1, arg2, arg3, out);
}

// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)
at::Tensor & _foobar_out::redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out) {
    
    static auto op = create__foobar_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, arg1, arg2, arg3, out);
}

// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adam_out::schema> create__fused_adam_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adam_out::name, _fused_adam_out::overload_name)
      .typed<_fused_adam_out::schema>();
}

// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_adam_out::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_adam_out_typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
}

// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_adam_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_adam_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
}

// aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adam::schema> create__fused_adam_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adam::name, _fused_adam::overload_name)
      .typed<_fused_adam::schema>();
}

// aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam_typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adam_tensor_lr_out::schema> create__fused_adam_tensor_lr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adam_tensor_lr_out::name, _fused_adam_tensor_lr_out::overload_name)
      .typed<_fused_adam_tensor_lr_out::schema>();
}

// aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_adam_tensor_lr_out::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_adam_tensor_lr_out_typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
}

// aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_adam_tensor_lr_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_adam_tensor_lr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
}

// aten::_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adam_tensor_lr::schema> create__fused_adam_tensor_lr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adam_tensor_lr::name, _fused_adam_tensor_lr::overload_name)
      .typed<_fused_adam_tensor_lr::schema>();
}

// aten::_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam_tensor_lr::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam_tensor_lr_typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam_tensor_lr::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adam_tensor_lr_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adamw_out::schema> create__fused_adamw_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adamw_out::name, _fused_adamw_out::overload_name)
      .typed<_fused_adamw_out::schema>();
}

// aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_adamw_out::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_adamw_out_typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
}

// aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_adamw_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_adamw_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
}

// aten::_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adamw::schema> create__fused_adamw_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adamw::name, _fused_adamw::overload_name)
      .typed<_fused_adamw::schema>();
}

// aten::_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adamw_typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adamw_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adamw.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adamw_tensor_lr_out::schema> create__fused_adamw_tensor_lr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adamw_tensor_lr_out::name, _fused_adamw_tensor_lr_out::overload_name)
      .typed<_fused_adamw_tensor_lr_out::schema>();
}

// aten::_fused_adamw.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_adamw_tensor_lr_out::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_adamw_tensor_lr_out_typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
}

// aten::_fused_adamw.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_adamw_tensor_lr_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_adamw_tensor_lr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
}

// aten::_fused_adamw.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adamw_tensor_lr::schema> create__fused_adamw_tensor_lr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adamw_tensor_lr::name, _fused_adamw_tensor_lr::overload_name)
      .typed<_fused_adamw_tensor_lr::schema>();
}

// aten::_fused_adamw.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw_tensor_lr::call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adamw_tensor_lr_typed_handle();
    return op.call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_adamw.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw_tensor_lr::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adamw_tensor_lr_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
}

// aten::_fused_sgd.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_sgd_out::schema> create__fused_sgd_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_sgd_out::name, _fused_sgd_out::overload_name)
      .typed<_fused_sgd_out::schema>();
}

// aten::_fused_sgd.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_sgd_out::call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_sgd_out_typed_handle();
    return op.call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out);
}

// aten::_fused_sgd.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_sgd_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_sgd_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out);
}

// aten::_fused_sgd(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)
static C10_NOINLINE c10::TypedOperatorHandle<_fused_sgd::schema> create__fused_sgd_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_sgd::name, _fused_sgd::overload_name)
      .typed<_fused_sgd::schema>();
}

// aten::_fused_sgd(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_sgd::call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd_typed_handle();
    return op.call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

// aten::_fused_sgd(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, float lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_sgd::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

// aten::_fused_sgd.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_sgd_tensor_lr_out::schema> create__fused_sgd_tensor_lr_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_sgd_tensor_lr_out::name, _fused_sgd_tensor_lr_out::overload_name)
      .typed<_fused_sgd_tensor_lr_out::schema>();
}

// aten::_fused_sgd.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_sgd_tensor_lr_out::call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_sgd_tensor_lr_out_typed_handle();
    return op.call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out);
}

// aten::_fused_sgd.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_sgd_tensor_lr_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_sgd_tensor_lr_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out);
}

// aten::_fused_sgd.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)
static C10_NOINLINE c10::TypedOperatorHandle<_fused_sgd_tensor_lr::schema> create__fused_sgd_tensor_lr_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_sgd_tensor_lr::name, _fused_sgd_tensor_lr::overload_name)
      .typed<_fused_sgd_tensor_lr::schema>();
}

// aten::_fused_sgd.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_sgd_tensor_lr::call(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd_tensor_lr_typed_handle();
    return op.call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

// aten::_fused_sgd.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] momentum_buffer_list, *, float weight_decay, float momentum, Tensor lr, float dampening, bool nesterov, bool maximize, bool is_first_step, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] momentum_buffer_list_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_sgd_tensor_lr::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_sgd_tensor_lr_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
}

// aten::_fused_adagrad.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] state_sums, Tensor(d!)[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adagrad_out::schema> create__fused_adagrad_out_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adagrad_out::name, _fused_adagrad_out::overload_name)
      .typed<_fused_adagrad_out::schema>();
}

// aten::_fused_adagrad.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] state_sums, Tensor(d!)[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_adagrad_out::call(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_adagrad_out_typed_handle();
    return op.call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf, out);
}

// aten::_fused_adagrad.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] state_sums, Tensor(d!)[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
void _fused_adagrad_out::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
    
    static auto op = create__fused_adagrad_out_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf, out);
}

// aten::_fused_adagrad(Tensor[] self, Tensor[] grads, Tensor[] state_sums, Tensor[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] state_sums_out, Tensor[] state_steps_out)
static C10_NOINLINE c10::TypedOperatorHandle<_fused_adagrad::schema> create__fused_adagrad_typed_handle() {
  return c10::Dispatcher::singleton()
      .findSchemaOrThrow(_fused_adagrad::name, _fused_adagrad::overload_name)
      .typed<_fused_adagrad::schema>();
}

// aten::_fused_adagrad(Tensor[] self, Tensor[] grads, Tensor[] state_sums, Tensor[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] state_sums_out, Tensor[] state_steps_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adagrad::call(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adagrad_typed_handle();
    return op.call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf);
}

// aten::_fused_adagrad(Tensor[] self, Tensor[] grads, Tensor[] state_sums, Tensor[] state_steps, *, float lr, float lr_decay, float weight_decay, float eps, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] state_sums_out, Tensor[] state_steps_out)
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adagrad::redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
    
    static auto op = create__fused_adagrad_typed_handle();
    return op.redispatch(dispatchKeySet, self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf);
}

}} // namespace at::_ops
